2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
9 * Copyright 2003-2010 Novell, Inc (http://www.novell.com)
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
11 * Licensed under the MIT license. See LICENSE file in the project root for full license information.
15 #include <mono/utils/mono-compiler.h>
30 #ifdef HAVE_SYS_TIME_H
38 #include <mono/utils/memcheck.h>
39 #include <mono/metadata/abi-details.h>
40 #include <mono/metadata/assembly.h>
41 #include <mono/metadata/attrdefs.h>
42 #include <mono/metadata/loader.h>
43 #include <mono/metadata/tabledefs.h>
44 #include <mono/metadata/class.h>
45 #include <mono/metadata/object.h>
46 #include <mono/metadata/exception.h>
47 #include <mono/metadata/opcodes.h>
48 #include <mono/metadata/mono-endian.h>
49 #include <mono/metadata/tokentype.h>
50 #include <mono/metadata/tabledefs.h>
51 #include <mono/metadata/marshal.h>
52 #include <mono/metadata/debug-helpers.h>
53 #include <mono/metadata/debug-internals.h>
54 #include <mono/metadata/gc-internals.h>
55 #include <mono/metadata/security-manager.h>
56 #include <mono/metadata/threads-types.h>
57 #include <mono/metadata/security-core-clr.h>
58 #include <mono/metadata/profiler-private.h>
59 #include <mono/metadata/profiler.h>
60 #include <mono/metadata/monitor.h>
61 #include <mono/utils/mono-memory-model.h>
62 #include <mono/utils/mono-error-internals.h>
63 #include <mono/metadata/mono-basic-block.h>
64 #include <mono/metadata/reflection-internals.h>
65 #include <mono/utils/mono-threads-coop.h>
71 #include "jit-icalls.h"
73 #include "debugger-agent.h"
74 #include "seq-points.h"
75 #include "aot-compiler.h"
76 #include "mini-llvm.h"
78 #define BRANCH_COST 10
79 #define INLINE_LENGTH_LIMIT 20
81 /* These have 'cfg' as an implicit argument */
82 #define INLINE_FAILURE(msg) do { \
83 if ((cfg->method != cfg->current_method) && (cfg->current_method->wrapper_type == MONO_WRAPPER_NONE)) { \
84 inline_failure (cfg, msg); \
85 goto exception_exit; \
88 #define CHECK_CFG_EXCEPTION do {\
89 if (cfg->exception_type != MONO_EXCEPTION_NONE) \
90 goto exception_exit; \
92 #define FIELD_ACCESS_FAILURE(method, field) do { \
93 field_access_failure ((cfg), (method), (field)); \
94 goto exception_exit; \
96 #define GENERIC_SHARING_FAILURE(opcode) do { \
98 gshared_failure (cfg, opcode, __FILE__, __LINE__); \
99 goto exception_exit; \
102 #define GSHAREDVT_FAILURE(opcode) do { \
103 if (cfg->gsharedvt) { \
104 gsharedvt_failure (cfg, opcode, __FILE__, __LINE__); \
105 goto exception_exit; \
108 #define OUT_OF_MEMORY_FAILURE do { \
109 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); \
110 mono_error_set_out_of_memory (&cfg->error, ""); \
111 goto exception_exit; \
113 #define DISABLE_AOT(cfg) do { \
114 if ((cfg)->verbose_level >= 2) \
115 printf ("AOT disabled: %s:%d\n", __FILE__, __LINE__); \
116 (cfg)->disable_aot = TRUE; \
118 #define LOAD_ERROR do { \
119 break_on_unverified (); \
120 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD); \
121 goto exception_exit; \
124 #define TYPE_LOAD_ERROR(klass) do { \
125 cfg->exception_ptr = klass; \
129 #define CHECK_CFG_ERROR do {\
130 if (!mono_error_ok (&cfg->error)) { \
131 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); \
132 goto mono_error_exit; \
136 /* Determine whenever 'ins' represents a load of the 'this' argument */
137 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
139 static int ldind_to_load_membase (int opcode);
140 static int stind_to_store_membase (int opcode);
142 int mono_op_to_op_imm (int opcode);
143 int mono_op_to_op_imm_noemul (int opcode);
145 static int inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
146 guchar *ip, guint real_offset, gboolean inline_always);
148 emit_llvmonly_virtual_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used, MonoInst **sp);
150 inline static MonoInst*
151 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *imt_arg, MonoInst *rgctx_arg);
153 /* helper methods signatures */
154 static MonoMethodSignature *helper_sig_domain_get;
155 static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
156 static MonoMethodSignature *helper_sig_llvmonly_imt_trampoline;
157 static MonoMethodSignature *helper_sig_jit_thread_attach;
158 static MonoMethodSignature *helper_sig_get_tls_tramp;
159 static MonoMethodSignature *helper_sig_set_tls_tramp;
161 /* type loading helpers */
162 static GENERATE_GET_CLASS_WITH_CACHE (runtime_helpers, "System.Runtime.CompilerServices", "RuntimeHelpers")
163 static GENERATE_TRY_GET_CLASS_WITH_CACHE (debuggable_attribute, "System.Diagnostics", "DebuggableAttribute")
166 * Instruction metadata
174 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
175 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
181 #if SIZEOF_REGISTER == 8 && SIZEOF_REGISTER == SIZEOF_VOID_P
186 /* keep in sync with the enum in mini.h */
189 #include "mini-ops.h"
194 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
195 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
197 * This should contain the index of the last sreg + 1. This is not the same
198 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
200 const gint8 ins_sreg_counts[] = {
201 #include "mini-ops.h"
207 mono_alloc_ireg (MonoCompile *cfg)
209 return alloc_ireg (cfg);
213 mono_alloc_lreg (MonoCompile *cfg)
215 return alloc_lreg (cfg);
219 mono_alloc_freg (MonoCompile *cfg)
221 return alloc_freg (cfg);
225 mono_alloc_preg (MonoCompile *cfg)
227 return alloc_preg (cfg);
231 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
233 return alloc_dreg (cfg, stack_type);
237 * mono_alloc_ireg_ref:
239 * Allocate an IREG, and mark it as holding a GC ref.
242 mono_alloc_ireg_ref (MonoCompile *cfg)
244 return alloc_ireg_ref (cfg);
248 * mono_alloc_ireg_mp:
250 * Allocate an IREG, and mark it as holding a managed pointer.
253 mono_alloc_ireg_mp (MonoCompile *cfg)
255 return alloc_ireg_mp (cfg);
259 * mono_alloc_ireg_copy:
261 * Allocate an IREG with the same GC type as VREG.
264 mono_alloc_ireg_copy (MonoCompile *cfg, guint32 vreg)
266 if (vreg_is_ref (cfg, vreg))
267 return alloc_ireg_ref (cfg);
268 else if (vreg_is_mp (cfg, vreg))
269 return alloc_ireg_mp (cfg);
271 return alloc_ireg (cfg);
275 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
280 type = mini_get_underlying_type (type);
282 switch (type->type) {
295 case MONO_TYPE_FNPTR:
297 case MONO_TYPE_CLASS:
298 case MONO_TYPE_STRING:
299 case MONO_TYPE_OBJECT:
300 case MONO_TYPE_SZARRAY:
301 case MONO_TYPE_ARRAY:
305 #if SIZEOF_REGISTER == 8
311 return cfg->r4fp ? OP_RMOVE : OP_FMOVE;
314 case MONO_TYPE_VALUETYPE:
315 if (type->data.klass->enumtype) {
316 type = mono_class_enum_basetype (type->data.klass);
319 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
322 case MONO_TYPE_TYPEDBYREF:
324 case MONO_TYPE_GENERICINST:
325 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
327 type = &type->data.generic_class->container_class->byval_arg;
331 g_assert (cfg->gshared);
332 if (mini_type_var_is_vt (type))
335 return mono_type_to_regmove (cfg, mini_get_underlying_type (type));
337 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
343 mono_print_bb (MonoBasicBlock *bb, const char *msg)
347 GString *str = g_string_new ("");
349 g_string_append_printf (str, "%s %d: [IN: ", msg, bb->block_num);
350 for (i = 0; i < bb->in_count; ++i)
351 g_string_append_printf (str, " BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
352 g_string_append_printf (str, ", OUT: ");
353 for (i = 0; i < bb->out_count; ++i)
354 g_string_append_printf (str, " BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
355 g_string_append_printf (str, " ]\n");
357 g_print ("%s", str->str);
358 g_string_free (str, TRUE);
360 for (tree = bb->code; tree; tree = tree->next)
361 mono_print_ins_index (-1, tree);
365 mono_create_helper_signatures (void)
367 helper_sig_domain_get = mono_create_icall_signature ("ptr");
368 helper_sig_rgctx_lazy_fetch_trampoline = mono_create_icall_signature ("ptr ptr");
369 helper_sig_llvmonly_imt_trampoline = mono_create_icall_signature ("ptr ptr ptr");
370 helper_sig_jit_thread_attach = mono_create_icall_signature ("ptr ptr");
371 helper_sig_get_tls_tramp = mono_create_icall_signature ("ptr");
372 helper_sig_set_tls_tramp = mono_create_icall_signature ("void ptr");
375 static MONO_NEVER_INLINE void
376 break_on_unverified (void)
378 if (mini_get_debug_options ()->break_on_unverified)
382 static MONO_NEVER_INLINE void
383 field_access_failure (MonoCompile *cfg, MonoMethod *method, MonoClassField *field)
385 char *method_fname = mono_method_full_name (method, TRUE);
386 char *field_fname = mono_field_full_name (field);
387 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
388 mono_error_set_generic_error (&cfg->error, "System", "FieldAccessException", "Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname);
389 g_free (method_fname);
390 g_free (field_fname);
393 static MONO_NEVER_INLINE void
394 inline_failure (MonoCompile *cfg, const char *msg)
396 if (cfg->verbose_level >= 2)
397 printf ("inline failed: %s\n", msg);
398 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INLINE_FAILED);
401 static MONO_NEVER_INLINE void
402 gshared_failure (MonoCompile *cfg, int opcode, const char *file, int line)
404 if (cfg->verbose_level > 2) \
405 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), line);
406 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
409 static MONO_NEVER_INLINE void
410 gsharedvt_failure (MonoCompile *cfg, int opcode, const char *file, int line)
412 cfg->exception_message = g_strdup_printf ("gsharedvt failed for method %s.%s.%s/%d opcode %s %s:%d", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), file, line);
413 if (cfg->verbose_level >= 2)
414 printf ("%s\n", cfg->exception_message);
415 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
419 * When using gsharedvt, some instatiations might be verifiable, and some might be not. i.e.
420 * foo<T> (int i) { ldarg.0; box T; }
422 #define UNVERIFIED do { \
423 if (cfg->gsharedvt) { \
424 if (cfg->verbose_level > 2) \
425 printf ("gsharedvt method failed to verify, falling back to instantiation.\n"); \
426 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
427 goto exception_exit; \
429 break_on_unverified (); \
433 #define GET_BBLOCK(cfg,tblock,ip) do { \
434 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
436 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
437 NEW_BBLOCK (cfg, (tblock)); \
438 (tblock)->cil_code = (ip); \
439 ADD_BBLOCK (cfg, (tblock)); \
443 #if defined(TARGET_X86) || defined(TARGET_AMD64)
444 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
445 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
446 (dest)->dreg = alloc_ireg_mp ((cfg)); \
447 (dest)->sreg1 = (sr1); \
448 (dest)->sreg2 = (sr2); \
449 (dest)->inst_imm = (imm); \
450 (dest)->backend.shift_amount = (shift); \
451 MONO_ADD_INS ((cfg)->cbb, (dest)); \
455 /* Emit conversions so both operands of a binary opcode are of the same type */
457 add_widen_op (MonoCompile *cfg, MonoInst *ins, MonoInst **arg1_ref, MonoInst **arg2_ref)
459 MonoInst *arg1 = *arg1_ref;
460 MonoInst *arg2 = *arg2_ref;
463 ((arg1->type == STACK_R4 && arg2->type == STACK_R8) ||
464 (arg1->type == STACK_R8 && arg2->type == STACK_R4))) {
467 /* Mixing r4/r8 is allowed by the spec */
468 if (arg1->type == STACK_R4) {
469 int dreg = alloc_freg (cfg);
471 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg1->dreg);
472 conv->type = STACK_R8;
476 if (arg2->type == STACK_R4) {
477 int dreg = alloc_freg (cfg);
479 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg2->dreg);
480 conv->type = STACK_R8;
486 #if SIZEOF_REGISTER == 8
487 /* FIXME: Need to add many more cases */
488 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) {
491 int dr = alloc_preg (cfg);
492 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg);
493 (ins)->sreg2 = widen->dreg;
498 #define ADD_BINOP(op) do { \
499 MONO_INST_NEW (cfg, ins, (op)); \
501 ins->sreg1 = sp [0]->dreg; \
502 ins->sreg2 = sp [1]->dreg; \
503 type_from_op (cfg, ins, sp [0], sp [1]); \
505 /* Have to insert a widening op */ \
506 add_widen_op (cfg, ins, &sp [0], &sp [1]); \
507 ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type); \
508 MONO_ADD_INS ((cfg)->cbb, (ins)); \
509 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
512 #define ADD_UNOP(op) do { \
513 MONO_INST_NEW (cfg, ins, (op)); \
515 ins->sreg1 = sp [0]->dreg; \
516 type_from_op (cfg, ins, sp [0], NULL); \
518 (ins)->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type); \
519 MONO_ADD_INS ((cfg)->cbb, (ins)); \
520 *sp++ = mono_decompose_opcode (cfg, ins); \
523 #define ADD_BINCOND(next_block) do { \
526 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
527 cmp->sreg1 = sp [0]->dreg; \
528 cmp->sreg2 = sp [1]->dreg; \
529 type_from_op (cfg, cmp, sp [0], sp [1]); \
531 add_widen_op (cfg, cmp, &sp [0], &sp [1]); \
532 type_from_op (cfg, ins, sp [0], sp [1]); \
533 ins->inst_many_bb = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
534 GET_BBLOCK (cfg, tblock, target); \
535 link_bblock (cfg, cfg->cbb, tblock); \
536 ins->inst_true_bb = tblock; \
537 if ((next_block)) { \
538 link_bblock (cfg, cfg->cbb, (next_block)); \
539 ins->inst_false_bb = (next_block); \
540 start_new_bblock = 1; \
542 GET_BBLOCK (cfg, tblock, ip); \
543 link_bblock (cfg, cfg->cbb, tblock); \
544 ins->inst_false_bb = tblock; \
545 start_new_bblock = 2; \
547 if (sp != stack_start) { \
548 handle_stack_args (cfg, stack_start, sp - stack_start); \
549 CHECK_UNVERIFIABLE (cfg); \
551 MONO_ADD_INS (cfg->cbb, cmp); \
552 MONO_ADD_INS (cfg->cbb, ins); \
556 * link_bblock: Links two basic blocks
558 * links two basic blocks in the control flow graph, the 'from'
559 * argument is the starting block and the 'to' argument is the block
560 * the control flow ends to after 'from'.
563 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
565 MonoBasicBlock **newa;
569 if (from->cil_code) {
571 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
573 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
576 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
578 printf ("edge from entry to exit\n");
583 for (i = 0; i < from->out_count; ++i) {
584 if (to == from->out_bb [i]) {
590 newa = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
591 for (i = 0; i < from->out_count; ++i) {
592 newa [i] = from->out_bb [i];
600 for (i = 0; i < to->in_count; ++i) {
601 if (from == to->in_bb [i]) {
607 newa = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
608 for (i = 0; i < to->in_count; ++i) {
609 newa [i] = to->in_bb [i];
618 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
620 link_bblock (cfg, from, to);
624 * mono_find_block_region:
626 * We mark each basic block with a region ID. We use that to avoid BB
627 * optimizations when blocks are in different regions.
630 * A region token that encodes where this region is, and information
631 * about the clause owner for this block.
633 * The region encodes the try/catch/filter clause that owns this block
634 * as well as the type. -1 is a special value that represents a block
635 * that is in none of try/catch/filter.
638 mono_find_block_region (MonoCompile *cfg, int offset)
640 MonoMethodHeader *header = cfg->header;
641 MonoExceptionClause *clause;
644 for (i = 0; i < header->num_clauses; ++i) {
645 clause = &header->clauses [i];
646 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
647 (offset < (clause->handler_offset)))
648 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
650 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
651 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
652 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
653 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
654 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
656 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
659 for (i = 0; i < header->num_clauses; ++i) {
660 clause = &header->clauses [i];
662 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
663 return ((i + 1) << 8) | clause->flags;
670 ip_in_finally_clause (MonoCompile *cfg, int offset)
672 MonoMethodHeader *header = cfg->header;
673 MonoExceptionClause *clause;
676 for (i = 0; i < header->num_clauses; ++i) {
677 clause = &header->clauses [i];
678 if (clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY && clause->flags != MONO_EXCEPTION_CLAUSE_FAULT)
681 if (MONO_OFFSET_IN_HANDLER (clause, offset))
688 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
690 MonoMethodHeader *header = cfg->header;
691 MonoExceptionClause *clause;
695 for (i = 0; i < header->num_clauses; ++i) {
696 clause = &header->clauses [i];
697 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
698 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
699 if (clause->flags == type)
700 res = g_list_append (res, clause);
707 mono_create_spvar_for_region (MonoCompile *cfg, int region)
711 var = (MonoInst *)g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
715 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
716 /* prevent it from being register allocated */
717 var->flags |= MONO_INST_VOLATILE;
719 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
723 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
725 return (MonoInst *)g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
729 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
733 var = (MonoInst *)g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
737 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
738 /* prevent it from being register allocated */
739 var->flags |= MONO_INST_VOLATILE;
741 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
747 * Returns the type used in the eval stack when @type is loaded.
748 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
751 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
755 type = mini_get_underlying_type (type);
756 inst->klass = klass = mono_class_from_mono_type (type);
758 inst->type = STACK_MP;
763 switch (type->type) {
765 inst->type = STACK_INV;
773 inst->type = STACK_I4;
778 case MONO_TYPE_FNPTR:
779 inst->type = STACK_PTR;
781 case MONO_TYPE_CLASS:
782 case MONO_TYPE_STRING:
783 case MONO_TYPE_OBJECT:
784 case MONO_TYPE_SZARRAY:
785 case MONO_TYPE_ARRAY:
786 inst->type = STACK_OBJ;
790 inst->type = STACK_I8;
793 inst->type = cfg->r4_stack_type;
796 inst->type = STACK_R8;
798 case MONO_TYPE_VALUETYPE:
799 if (type->data.klass->enumtype) {
800 type = mono_class_enum_basetype (type->data.klass);
804 inst->type = STACK_VTYPE;
807 case MONO_TYPE_TYPEDBYREF:
808 inst->klass = mono_defaults.typed_reference_class;
809 inst->type = STACK_VTYPE;
811 case MONO_TYPE_GENERICINST:
812 type = &type->data.generic_class->container_class->byval_arg;
816 g_assert (cfg->gshared);
817 if (mini_is_gsharedvt_type (type)) {
818 g_assert (cfg->gsharedvt);
819 inst->type = STACK_VTYPE;
821 type_to_eval_stack_type (cfg, mini_get_underlying_type (type), inst);
825 g_error ("unknown type 0x%02x in eval stack type", type->type);
830 * The following tables are used to quickly validate the IL code in type_from_op ().
833 bin_num_table [STACK_MAX] [STACK_MAX] = {
834 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
835 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
836 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
837 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
838 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R8},
839 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
840 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
841 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
842 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4}
847 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4
850 /* reduce the size of this table */
852 bin_int_table [STACK_MAX] [STACK_MAX] = {
853 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
854 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
855 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
856 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
857 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
858 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
859 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
860 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
864 bin_comp_table [STACK_MAX] [STACK_MAX] = {
865 /* Inv i L p F & O vt r4 */
867 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
868 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
869 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
870 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* F, R8 */
871 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
872 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
873 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
874 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* r, r4 */
877 /* reduce the size of this table */
879 shift_table [STACK_MAX] [STACK_MAX] = {
880 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
881 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
882 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
883 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
884 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
885 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
886 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
887 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
891 * Tables to map from the non-specific opcode to the matching
892 * type-specific opcode.
894 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
896 binops_op_map [STACK_MAX] = {
897 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD, 0, 0, OP_RADD-CEE_ADD
900 /* handles from CEE_NEG to CEE_CONV_U8 */
902 unops_op_map [STACK_MAX] = {
903 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG, 0, 0, OP_RNEG-CEE_NEG
906 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
908 ovfops_op_map [STACK_MAX] = {
909 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, 0, OP_RCONV_TO_U2-CEE_CONV_U2
912 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
914 ovf2ops_op_map [STACK_MAX] = {
915 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, 0, 0, OP_RCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
918 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
920 ovf3ops_op_map [STACK_MAX] = {
921 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, 0, 0, OP_RCONV_TO_OVF_I1-CEE_CONV_OVF_I1
924 /* handles from CEE_BEQ to CEE_BLT_UN */
926 beqops_op_map [STACK_MAX] = {
927 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, 0, OP_FBEQ-CEE_BEQ
930 /* handles from CEE_CEQ to CEE_CLT_UN */
932 ceqops_op_map [STACK_MAX] = {
933 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, 0, OP_RCEQ-OP_CEQ
937 * Sets ins->type (the type on the eval stack) according to the
938 * type of the opcode and the arguments to it.
939 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
941 * FIXME: this function sets ins->type unconditionally in some cases, but
942 * it should set it to invalid for some types (a conv.x on an object)
945 type_from_op (MonoCompile *cfg, MonoInst *ins, MonoInst *src1, MonoInst *src2)
947 switch (ins->opcode) {
954 /* FIXME: check unverifiable args for STACK_MP */
955 ins->type = bin_num_table [src1->type] [src2->type];
956 ins->opcode += binops_op_map [ins->type];
963 ins->type = bin_int_table [src1->type] [src2->type];
964 ins->opcode += binops_op_map [ins->type];
969 ins->type = shift_table [src1->type] [src2->type];
970 ins->opcode += binops_op_map [ins->type];
975 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
976 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
977 ins->opcode = OP_LCOMPARE;
978 else if (src1->type == STACK_R4)
979 ins->opcode = OP_RCOMPARE;
980 else if (src1->type == STACK_R8)
981 ins->opcode = OP_FCOMPARE;
983 ins->opcode = OP_ICOMPARE;
985 case OP_ICOMPARE_IMM:
986 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
987 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
988 ins->opcode = OP_LCOMPARE_IMM;
1000 ins->opcode += beqops_op_map [src1->type];
1003 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
1004 ins->opcode += ceqops_op_map [src1->type];
1010 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
1011 ins->opcode += ceqops_op_map [src1->type];
1015 ins->type = neg_table [src1->type];
1016 ins->opcode += unops_op_map [ins->type];
1019 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
1020 ins->type = src1->type;
1022 ins->type = STACK_INV;
1023 ins->opcode += unops_op_map [ins->type];
1029 ins->type = STACK_I4;
1030 ins->opcode += unops_op_map [src1->type];
1033 ins->type = STACK_R8;
1034 switch (src1->type) {
1037 ins->opcode = OP_ICONV_TO_R_UN;
1040 ins->opcode = OP_LCONV_TO_R_UN;
1044 case CEE_CONV_OVF_I1:
1045 case CEE_CONV_OVF_U1:
1046 case CEE_CONV_OVF_I2:
1047 case CEE_CONV_OVF_U2:
1048 case CEE_CONV_OVF_I4:
1049 case CEE_CONV_OVF_U4:
1050 ins->type = STACK_I4;
1051 ins->opcode += ovf3ops_op_map [src1->type];
1053 case CEE_CONV_OVF_I_UN:
1054 case CEE_CONV_OVF_U_UN:
1055 ins->type = STACK_PTR;
1056 ins->opcode += ovf2ops_op_map [src1->type];
1058 case CEE_CONV_OVF_I1_UN:
1059 case CEE_CONV_OVF_I2_UN:
1060 case CEE_CONV_OVF_I4_UN:
1061 case CEE_CONV_OVF_U1_UN:
1062 case CEE_CONV_OVF_U2_UN:
1063 case CEE_CONV_OVF_U4_UN:
1064 ins->type = STACK_I4;
1065 ins->opcode += ovf2ops_op_map [src1->type];
1068 ins->type = STACK_PTR;
1069 switch (src1->type) {
1071 ins->opcode = OP_ICONV_TO_U;
1075 #if SIZEOF_VOID_P == 8
1076 ins->opcode = OP_LCONV_TO_U;
1078 ins->opcode = OP_MOVE;
1082 ins->opcode = OP_LCONV_TO_U;
1085 ins->opcode = OP_FCONV_TO_U;
1091 ins->type = STACK_I8;
1092 ins->opcode += unops_op_map [src1->type];
1094 case CEE_CONV_OVF_I8:
1095 case CEE_CONV_OVF_U8:
1096 ins->type = STACK_I8;
1097 ins->opcode += ovf3ops_op_map [src1->type];
1099 case CEE_CONV_OVF_U8_UN:
1100 case CEE_CONV_OVF_I8_UN:
1101 ins->type = STACK_I8;
1102 ins->opcode += ovf2ops_op_map [src1->type];
1105 ins->type = cfg->r4_stack_type;
1106 ins->opcode += unops_op_map [src1->type];
1109 ins->type = STACK_R8;
1110 ins->opcode += unops_op_map [src1->type];
1113 ins->type = STACK_R8;
1117 ins->type = STACK_I4;
1118 ins->opcode += ovfops_op_map [src1->type];
1121 case CEE_CONV_OVF_I:
1122 case CEE_CONV_OVF_U:
1123 ins->type = STACK_PTR;
1124 ins->opcode += ovfops_op_map [src1->type];
1127 case CEE_ADD_OVF_UN:
1129 case CEE_MUL_OVF_UN:
1131 case CEE_SUB_OVF_UN:
1132 ins->type = bin_num_table [src1->type] [src2->type];
1133 ins->opcode += ovfops_op_map [src1->type];
1134 if (ins->type == STACK_R8)
1135 ins->type = STACK_INV;
1137 case OP_LOAD_MEMBASE:
1138 ins->type = STACK_PTR;
1140 case OP_LOADI1_MEMBASE:
1141 case OP_LOADU1_MEMBASE:
1142 case OP_LOADI2_MEMBASE:
1143 case OP_LOADU2_MEMBASE:
1144 case OP_LOADI4_MEMBASE:
1145 case OP_LOADU4_MEMBASE:
1146 ins->type = STACK_PTR;
1148 case OP_LOADI8_MEMBASE:
1149 ins->type = STACK_I8;
1151 case OP_LOADR4_MEMBASE:
1152 ins->type = cfg->r4_stack_type;
1154 case OP_LOADR8_MEMBASE:
1155 ins->type = STACK_R8;
1158 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
1162 if (ins->type == STACK_MP)
1163 ins->klass = mono_defaults.object_class;
1168 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
1174 param_table [STACK_MAX] [STACK_MAX] = {
1179 check_values_to_signature (MonoInst *args, MonoType *this_ins, MonoMethodSignature *sig)
1184 switch (args->type) {
1194 for (i = 0; i < sig->param_count; ++i) {
1195 switch (args [i].type) {
1199 if (!sig->params [i]->byref)
1203 if (sig->params [i]->byref)
1205 switch (sig->params [i]->type) {
1206 case MONO_TYPE_CLASS:
1207 case MONO_TYPE_STRING:
1208 case MONO_TYPE_OBJECT:
1209 case MONO_TYPE_SZARRAY:
1210 case MONO_TYPE_ARRAY:
1217 if (sig->params [i]->byref)
1219 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1228 /*if (!param_table [args [i].type] [sig->params [i]->type])
1236 * When we need a pointer to the current domain many times in a method, we
1237 * call mono_domain_get() once and we store the result in a local variable.
1238 * This function returns the variable that represents the MonoDomain*.
1240 inline static MonoInst *
1241 mono_get_domainvar (MonoCompile *cfg)
1243 if (!cfg->domainvar)
1244 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1245 return cfg->domainvar;
1249 * The got_var contains the address of the Global Offset Table when AOT
1253 mono_get_got_var (MonoCompile *cfg)
1255 if (!cfg->compile_aot || !cfg->backend->need_got_var)
1257 if (!cfg->got_var) {
1258 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1260 return cfg->got_var;
1264 mono_get_vtable_var (MonoCompile *cfg)
1266 g_assert (cfg->gshared);
1268 if (!cfg->rgctx_var) {
1269 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1270 /* force the var to be stack allocated */
1271 cfg->rgctx_var->flags |= MONO_INST_VOLATILE;
1274 return cfg->rgctx_var;
1278 type_from_stack_type (MonoInst *ins) {
1279 switch (ins->type) {
1280 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1281 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1282 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1283 case STACK_R4: return &mono_defaults.single_class->byval_arg;
1284 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1286 return &ins->klass->this_arg;
1287 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1288 case STACK_VTYPE: return &ins->klass->byval_arg;
1290 g_error ("stack type %d to monotype not handled\n", ins->type);
1295 static G_GNUC_UNUSED int
1296 type_to_stack_type (MonoCompile *cfg, MonoType *t)
1298 t = mono_type_get_underlying_type (t);
1310 case MONO_TYPE_FNPTR:
1312 case MONO_TYPE_CLASS:
1313 case MONO_TYPE_STRING:
1314 case MONO_TYPE_OBJECT:
1315 case MONO_TYPE_SZARRAY:
1316 case MONO_TYPE_ARRAY:
1322 return cfg->r4_stack_type;
1325 case MONO_TYPE_VALUETYPE:
1326 case MONO_TYPE_TYPEDBYREF:
1328 case MONO_TYPE_GENERICINST:
1329 if (mono_type_generic_inst_is_valuetype (t))
1335 g_assert_not_reached ();
1342 array_access_to_klass (int opcode)
1346 return mono_defaults.byte_class;
1348 return mono_defaults.uint16_class;
1351 return mono_defaults.int_class;
1354 return mono_defaults.sbyte_class;
1357 return mono_defaults.int16_class;
1360 return mono_defaults.int32_class;
1362 return mono_defaults.uint32_class;
1365 return mono_defaults.int64_class;
1368 return mono_defaults.single_class;
1371 return mono_defaults.double_class;
1372 case CEE_LDELEM_REF:
1373 case CEE_STELEM_REF:
1374 return mono_defaults.object_class;
1376 g_assert_not_reached ();
1382 * We try to share variables when possible
1385 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1390 /* inlining can result in deeper stacks */
1391 if (slot >= cfg->header->max_stack)
1392 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1394 pos = ins->type - 1 + slot * STACK_MAX;
1396 switch (ins->type) {
1403 if ((vnum = cfg->intvars [pos]))
1404 return cfg->varinfo [vnum];
1405 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1406 cfg->intvars [pos] = res->inst_c0;
1409 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1415 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1418 * Don't use this if a generic_context is set, since that means AOT can't
1419 * look up the method using just the image+token.
1420 * table == 0 means this is a reference made from a wrapper.
1422 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1423 MonoJumpInfoToken *jump_info_token = (MonoJumpInfoToken *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1424 jump_info_token->image = image;
1425 jump_info_token->token = token;
1426 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1431 * This function is called to handle items that are left on the evaluation stack
1432 * at basic block boundaries. What happens is that we save the values to local variables
1433 * and we reload them later when first entering the target basic block (with the
1434 * handle_loaded_temps () function).
1435 * A single joint point will use the same variables (stored in the array bb->out_stack or
1436 * bb->in_stack, if the basic block is before or after the joint point).
1438 * This function needs to be called _before_ emitting the last instruction of
1439 * the bb (i.e. before emitting a branch).
1440 * If the stack merge fails at a join point, cfg->unverifiable is set.
1443 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1446 MonoBasicBlock *bb = cfg->cbb;
1447 MonoBasicBlock *outb;
1448 MonoInst *inst, **locals;
1453 if (cfg->verbose_level > 3)
1454 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1455 if (!bb->out_scount) {
1456 bb->out_scount = count;
1457 //printf ("bblock %d has out:", bb->block_num);
1459 for (i = 0; i < bb->out_count; ++i) {
1460 outb = bb->out_bb [i];
1461 /* exception handlers are linked, but they should not be considered for stack args */
1462 if (outb->flags & BB_EXCEPTION_HANDLER)
1464 //printf (" %d", outb->block_num);
1465 if (outb->in_stack) {
1467 bb->out_stack = outb->in_stack;
1473 bb->out_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1474 for (i = 0; i < count; ++i) {
1476 * try to reuse temps already allocated for this purpouse, if they occupy the same
1477 * stack slot and if they are of the same type.
1478 * This won't cause conflicts since if 'local' is used to
1479 * store one of the values in the in_stack of a bblock, then
1480 * the same variable will be used for the same outgoing stack
1482 * This doesn't work when inlining methods, since the bblocks
1483 * in the inlined methods do not inherit their in_stack from
1484 * the bblock they are inlined to. See bug #58863 for an
1487 if (cfg->inlined_method)
1488 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1490 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1495 for (i = 0; i < bb->out_count; ++i) {
1496 outb = bb->out_bb [i];
1497 /* exception handlers are linked, but they should not be considered for stack args */
1498 if (outb->flags & BB_EXCEPTION_HANDLER)
1500 if (outb->in_scount) {
1501 if (outb->in_scount != bb->out_scount) {
1502 cfg->unverifiable = TRUE;
1505 continue; /* check they are the same locals */
1507 outb->in_scount = count;
1508 outb->in_stack = bb->out_stack;
1511 locals = bb->out_stack;
1513 for (i = 0; i < count; ++i) {
1514 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1515 inst->cil_code = sp [i]->cil_code;
1516 sp [i] = locals [i];
1517 if (cfg->verbose_level > 3)
1518 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1522 * It is possible that the out bblocks already have in_stack assigned, and
1523 * the in_stacks differ. In this case, we will store to all the different
1530 /* Find a bblock which has a different in_stack */
1532 while (bindex < bb->out_count) {
1533 outb = bb->out_bb [bindex];
1534 /* exception handlers are linked, but they should not be considered for stack args */
1535 if (outb->flags & BB_EXCEPTION_HANDLER) {
1539 if (outb->in_stack != locals) {
1540 for (i = 0; i < count; ++i) {
1541 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1542 inst->cil_code = sp [i]->cil_code;
1543 sp [i] = locals [i];
1544 if (cfg->verbose_level > 3)
1545 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1547 locals = outb->in_stack;
1557 emit_runtime_constant (MonoCompile *cfg, MonoJumpInfoType patch_type, gpointer data)
1561 if (cfg->compile_aot) {
1562 EMIT_NEW_AOTCONST (cfg, ins, patch_type, data);
1568 ji.type = patch_type;
1569 ji.data.target = data;
1570 target = mono_resolve_patch_target (NULL, cfg->domain, NULL, &ji, FALSE, &error);
1571 mono_error_assert_ok (&error);
1573 EMIT_NEW_PCONST (cfg, ins, target);
1579 mini_emit_runtime_constant (MonoCompile *cfg, MonoJumpInfoType patch_type, gpointer data)
1581 return emit_runtime_constant (cfg, patch_type, data);
1585 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1589 g_assert (val == 0);
1594 if ((size <= SIZEOF_REGISTER) && (size <= align)) {
1597 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1600 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1603 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1605 #if SIZEOF_REGISTER == 8
1607 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1613 val_reg = alloc_preg (cfg);
1615 if (SIZEOF_REGISTER == 8)
1616 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1618 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1621 /* This could be optimized further if neccesary */
1623 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1630 if (!cfg->backend->no_unaligned_access && SIZEOF_REGISTER == 8) {
1632 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1637 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1644 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1649 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1654 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1661 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1668 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1669 g_assert (size < 10000);
1672 /* This could be optimized further if neccesary */
1674 cur_reg = alloc_preg (cfg);
1675 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1676 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1683 if (!cfg->backend->no_unaligned_access && SIZEOF_REGISTER == 8) {
1685 cur_reg = alloc_preg (cfg);
1686 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1687 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1695 cur_reg = alloc_preg (cfg);
1696 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1697 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1703 cur_reg = alloc_preg (cfg);
1704 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1705 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1711 cur_reg = alloc_preg (cfg);
1712 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1713 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1721 mono_create_fast_tls_getter (MonoCompile *cfg, MonoTlsKey key)
1723 int tls_offset = mono_tls_get_tls_offset (key);
1725 if (cfg->compile_aot)
1728 if (tls_offset != -1 && mono_arch_have_fast_tls ()) {
1730 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
1731 ins->dreg = mono_alloc_preg (cfg);
1732 ins->inst_offset = tls_offset;
1739 mono_create_fast_tls_setter (MonoCompile *cfg, MonoInst* value, MonoTlsKey key)
1741 int tls_offset = mono_tls_get_tls_offset (key);
1743 if (cfg->compile_aot)
1746 if (tls_offset != -1 && mono_arch_have_fast_tls ()) {
1748 MONO_INST_NEW (cfg, ins, OP_TLS_SET);
1749 ins->sreg1 = value->dreg;
1750 ins->inst_offset = tls_offset;
1758 mono_create_tls_get (MonoCompile *cfg, MonoTlsKey key)
1760 MonoInst *fast_tls = NULL;
1762 if (!mini_get_debug_options ()->use_fallback_tls)
1763 fast_tls = mono_create_fast_tls_getter (cfg, key);
1766 MONO_ADD_INS (cfg->cbb, fast_tls);
1770 if (cfg->compile_aot) {
1773 * tls getters are critical pieces of code and we don't want to resolve them
1774 * through the standard plt/tramp mechanism since we might expose ourselves
1775 * to crashes and infinite recursions.
1777 EMIT_NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_GET_TLS_TRAMP, (void*)key);
1778 return mono_emit_calli (cfg, helper_sig_get_tls_tramp, NULL, addr, NULL, NULL);
1780 gpointer getter = mono_tls_get_tls_getter (key, FALSE);
1781 return mono_emit_jit_icall (cfg, getter, NULL);
1786 mono_create_tls_set (MonoCompile *cfg, MonoInst *value, MonoTlsKey key)
1788 MonoInst *fast_tls = NULL;
1790 if (!mini_get_debug_options ()->use_fallback_tls)
1791 fast_tls = mono_create_fast_tls_setter (cfg, value, key);
1794 MONO_ADD_INS (cfg->cbb, fast_tls);
1798 if (cfg->compile_aot) {
1800 EMIT_NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_SET_TLS_TRAMP, (void*)key);
1801 return mono_emit_calli (cfg, helper_sig_set_tls_tramp, &value, addr, NULL, NULL);
1803 gpointer setter = mono_tls_get_tls_setter (key, FALSE);
1804 return mono_emit_jit_icall (cfg, setter, &value);
1811 * Emit IR to push the current LMF onto the LMF stack.
1814 emit_push_lmf (MonoCompile *cfg)
1817 * Emit IR to push the LMF:
1818 * lmf_addr = <lmf_addr from tls>
1819 * lmf->lmf_addr = lmf_addr
1820 * lmf->prev_lmf = *lmf_addr
1823 MonoInst *ins, *lmf_ins;
1828 if (cfg->lmf_ir_mono_lmf) {
1829 MonoInst *lmf_vara_ins, *lmf_ins;
1830 /* Load current lmf */
1831 lmf_ins = mono_create_tls_get (cfg, TLS_KEY_LMF);
1833 EMIT_NEW_VARLOADA (cfg, lmf_vara_ins, cfg->lmf_var, NULL);
1834 /* Save previous_lmf */
1835 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_vara_ins->dreg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), lmf_ins->dreg);
1837 mono_create_tls_set (cfg, lmf_vara_ins, TLS_KEY_LMF);
1839 int lmf_reg, prev_lmf_reg;
1841 * Store lmf_addr in a variable, so it can be allocated to a global register.
1843 if (!cfg->lmf_addr_var)
1844 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1847 ins = mono_create_tls_get (cfg, TLS_KEY_JIT_TLS);
1849 int jit_tls_dreg = ins->dreg;
1851 lmf_reg = alloc_preg (cfg);
1852 EMIT_NEW_BIALU_IMM (cfg, lmf_ins, OP_PADD_IMM, lmf_reg, jit_tls_dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
1854 lmf_ins = mono_create_tls_get (cfg, TLS_KEY_LMF_ADDR);
1857 lmf_ins->dreg = cfg->lmf_addr_var->dreg;
1859 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
1860 lmf_reg = ins->dreg;
1862 prev_lmf_reg = alloc_preg (cfg);
1863 /* Save previous_lmf */
1864 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, cfg->lmf_addr_var->dreg, 0);
1865 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), prev_lmf_reg);
1867 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, cfg->lmf_addr_var->dreg, 0, lmf_reg);
1874 * Emit IR to pop the current LMF from the LMF stack.
1877 emit_pop_lmf (MonoCompile *cfg)
1879 int lmf_reg, lmf_addr_reg;
1885 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
1886 lmf_reg = ins->dreg;
1888 if (cfg->lmf_ir_mono_lmf) {
1889 /* Load previous_lmf */
1890 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, alloc_preg (cfg), lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
1892 mono_create_tls_set (cfg, ins, TLS_KEY_LMF);
1896 * Emit IR to pop the LMF:
1897 * *(lmf->lmf_addr) = lmf->prev_lmf
1899 /* This could be called before emit_push_lmf () */
1900 if (!cfg->lmf_addr_var)
1901 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1902 lmf_addr_reg = cfg->lmf_addr_var->dreg;
1904 prev_lmf_reg = alloc_preg (cfg);
1905 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
1906 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_addr_reg, 0, prev_lmf_reg);
1911 emit_instrumentation_call (MonoCompile *cfg, void *func)
1913 MonoInst *iargs [1];
1916 * Avoid instrumenting inlined methods since it can
1917 * distort profiling results.
1919 if (cfg->method != cfg->current_method)
1922 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE) {
1923 EMIT_NEW_METHODCONST (cfg, iargs [0], cfg->method);
1924 mono_emit_jit_icall (cfg, func, iargs);
1929 ret_type_to_call_opcode (MonoCompile *cfg, MonoType *type, int calli, int virt)
1932 type = mini_get_underlying_type (type);
1933 switch (type->type) {
1934 case MONO_TYPE_VOID:
1935 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALL_MEMBASE: OP_VOIDCALL;
1942 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
1946 case MONO_TYPE_FNPTR:
1947 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
1948 case MONO_TYPE_CLASS:
1949 case MONO_TYPE_STRING:
1950 case MONO_TYPE_OBJECT:
1951 case MONO_TYPE_SZARRAY:
1952 case MONO_TYPE_ARRAY:
1953 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
1956 return calli? OP_LCALL_REG: virt? OP_LCALL_MEMBASE: OP_LCALL;
1959 return calli? OP_RCALL_REG: virt? OP_RCALL_MEMBASE: OP_RCALL;
1961 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
1963 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
1964 case MONO_TYPE_VALUETYPE:
1965 if (type->data.klass->enumtype) {
1966 type = mono_class_enum_basetype (type->data.klass);
1969 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
1970 case MONO_TYPE_TYPEDBYREF:
1971 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
1972 case MONO_TYPE_GENERICINST:
1973 type = &type->data.generic_class->container_class->byval_arg;
1976 case MONO_TYPE_MVAR:
1978 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
1980 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
1985 //XXX this ignores if t is byref
1986 #define MONO_TYPE_IS_PRIMITIVE_SCALAR(t) ((((((t)->type >= MONO_TYPE_BOOLEAN && (t)->type <= MONO_TYPE_U8) || ((t)->type >= MONO_TYPE_I && (t)->type <= MONO_TYPE_U)))))
1989 * target_type_is_incompatible:
1990 * @cfg: MonoCompile context
1992 * Check that the item @arg on the evaluation stack can be stored
1993 * in the target type (can be a local, or field, etc).
1994 * The cfg arg can be used to check if we need verification or just
1997 * Returns: non-0 value if arg can't be stored on a target.
2000 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
2002 MonoType *simple_type;
2005 if (target->byref) {
2006 /* FIXME: check that the pointed to types match */
2007 if (arg->type == STACK_MP) {
2008 /* This is needed to handle gshared types + ldaddr. We lower the types so we can handle enums and other typedef-like types. */
2009 MonoClass *target_class_lowered = mono_class_from_mono_type (mini_get_underlying_type (&mono_class_from_mono_type (target)->byval_arg));
2010 MonoClass *source_class_lowered = mono_class_from_mono_type (mini_get_underlying_type (&arg->klass->byval_arg));
2012 /* if the target is native int& or same type */
2013 if (target->type == MONO_TYPE_I || target_class_lowered == source_class_lowered)
2016 /* Both are primitive type byrefs and the source points to a larger type that the destination */
2017 if (MONO_TYPE_IS_PRIMITIVE_SCALAR (&target_class_lowered->byval_arg) && MONO_TYPE_IS_PRIMITIVE_SCALAR (&source_class_lowered->byval_arg) &&
2018 mono_class_instance_size (target_class_lowered) <= mono_class_instance_size (source_class_lowered))
2022 if (arg->type == STACK_PTR)
2027 simple_type = mini_get_underlying_type (target);
2028 switch (simple_type->type) {
2029 case MONO_TYPE_VOID:
2037 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
2041 /* STACK_MP is needed when setting pinned locals */
2042 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2047 case MONO_TYPE_FNPTR:
2049 * Some opcodes like ldloca returns 'transient pointers' which can be stored in
2050 * in native int. (#688008).
2052 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2055 case MONO_TYPE_CLASS:
2056 case MONO_TYPE_STRING:
2057 case MONO_TYPE_OBJECT:
2058 case MONO_TYPE_SZARRAY:
2059 case MONO_TYPE_ARRAY:
2060 if (arg->type != STACK_OBJ)
2062 /* FIXME: check type compatibility */
2066 if (arg->type != STACK_I8)
2070 if (arg->type != cfg->r4_stack_type)
2074 if (arg->type != STACK_R8)
2077 case MONO_TYPE_VALUETYPE:
2078 if (arg->type != STACK_VTYPE)
2080 klass = mono_class_from_mono_type (simple_type);
2081 if (klass != arg->klass)
2084 case MONO_TYPE_TYPEDBYREF:
2085 if (arg->type != STACK_VTYPE)
2087 klass = mono_class_from_mono_type (simple_type);
2088 if (klass != arg->klass)
2091 case MONO_TYPE_GENERICINST:
2092 if (mono_type_generic_inst_is_valuetype (simple_type)) {
2093 MonoClass *target_class;
2094 if (arg->type != STACK_VTYPE)
2096 klass = mono_class_from_mono_type (simple_type);
2097 target_class = mono_class_from_mono_type (target);
2098 /* The second cases is needed when doing partial sharing */
2099 if (klass != arg->klass && target_class != arg->klass && target_class != mono_class_from_mono_type (mini_get_underlying_type (&arg->klass->byval_arg)))
2103 if (arg->type != STACK_OBJ)
2105 /* FIXME: check type compatibility */
2109 case MONO_TYPE_MVAR:
2110 g_assert (cfg->gshared);
2111 if (mini_type_var_is_vt (simple_type)) {
2112 if (arg->type != STACK_VTYPE)
2115 if (arg->type != STACK_OBJ)
2120 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
2126 * Prepare arguments for passing to a function call.
2127 * Return a non-zero value if the arguments can't be passed to the given
2129 * The type checks are not yet complete and some conversions may need
2130 * casts on 32 or 64 bit architectures.
2132 * FIXME: implement this using target_type_is_incompatible ()
2135 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
2137 MonoType *simple_type;
2141 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
2145 for (i = 0; i < sig->param_count; ++i) {
2146 if (sig->params [i]->byref) {
2147 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
2151 simple_type = mini_get_underlying_type (sig->params [i]);
2153 switch (simple_type->type) {
2154 case MONO_TYPE_VOID:
2163 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2169 case MONO_TYPE_FNPTR:
2170 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2173 case MONO_TYPE_CLASS:
2174 case MONO_TYPE_STRING:
2175 case MONO_TYPE_OBJECT:
2176 case MONO_TYPE_SZARRAY:
2177 case MONO_TYPE_ARRAY:
2178 if (args [i]->type != STACK_OBJ)
2183 if (args [i]->type != STACK_I8)
2187 if (args [i]->type != cfg->r4_stack_type)
2191 if (args [i]->type != STACK_R8)
2194 case MONO_TYPE_VALUETYPE:
2195 if (simple_type->data.klass->enumtype) {
2196 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2199 if (args [i]->type != STACK_VTYPE)
2202 case MONO_TYPE_TYPEDBYREF:
2203 if (args [i]->type != STACK_VTYPE)
2206 case MONO_TYPE_GENERICINST:
2207 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2210 case MONO_TYPE_MVAR:
2212 if (args [i]->type != STACK_VTYPE)
2216 g_error ("unknown type 0x%02x in check_call_signature",
2224 callvirt_to_call (int opcode)
2227 case OP_CALL_MEMBASE:
2229 case OP_VOIDCALL_MEMBASE:
2231 case OP_FCALL_MEMBASE:
2233 case OP_RCALL_MEMBASE:
2235 case OP_VCALL_MEMBASE:
2237 case OP_LCALL_MEMBASE:
2240 g_assert_not_reached ();
2247 callvirt_to_call_reg (int opcode)
2250 case OP_CALL_MEMBASE:
2252 case OP_VOIDCALL_MEMBASE:
2253 return OP_VOIDCALL_REG;
2254 case OP_FCALL_MEMBASE:
2255 return OP_FCALL_REG;
2256 case OP_RCALL_MEMBASE:
2257 return OP_RCALL_REG;
2258 case OP_VCALL_MEMBASE:
2259 return OP_VCALL_REG;
2260 case OP_LCALL_MEMBASE:
2261 return OP_LCALL_REG;
2263 g_assert_not_reached ();
2269 /* Either METHOD or IMT_ARG needs to be set */
2271 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoMethod *method, MonoInst *imt_arg)
2275 if (COMPILE_LLVM (cfg)) {
2277 method_reg = alloc_preg (cfg);
2278 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2280 MonoInst *ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_METHODCONST, method);
2281 method_reg = ins->dreg;
2285 call->imt_arg_reg = method_reg;
2287 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2292 method_reg = alloc_preg (cfg);
2293 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2295 MonoInst *ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_METHODCONST, method);
2296 method_reg = ins->dreg;
2299 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2302 static MonoJumpInfo *
2303 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2305 MonoJumpInfo *ji = (MonoJumpInfo *)mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2309 ji->data.target = target;
2315 mini_class_check_context_used (MonoCompile *cfg, MonoClass *klass)
2318 return mono_class_check_context_used (klass);
2324 mini_method_check_context_used (MonoCompile *cfg, MonoMethod *method)
2327 return mono_method_check_context_used (method);
2333 * check_method_sharing:
2335 * Check whenever the vtable or an mrgctx needs to be passed when calling CMETHOD.
2338 check_method_sharing (MonoCompile *cfg, MonoMethod *cmethod, gboolean *out_pass_vtable, gboolean *out_pass_mrgctx)
2340 gboolean pass_vtable = FALSE;
2341 gboolean pass_mrgctx = FALSE;
2343 if (((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
2344 (mono_class_is_ginst (cmethod->klass) || mono_class_is_gtd (cmethod->klass))) {
2345 gboolean sharable = FALSE;
2347 if (mono_method_is_generic_sharable_full (cmethod, TRUE, TRUE, TRUE))
2351 * Pass vtable iff target method might
2352 * be shared, which means that sharing
2353 * is enabled for its class and its
2354 * context is sharable (and it's not a
2357 if (sharable && !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
2361 if (mini_method_get_context (cmethod) &&
2362 mini_method_get_context (cmethod)->method_inst) {
2363 g_assert (!pass_vtable);
2365 if (mono_method_is_generic_sharable_full (cmethod, TRUE, TRUE, TRUE)) {
2368 if (cfg->gsharedvt && mini_is_gsharedvt_signature (mono_method_signature (cmethod)))
2373 if (out_pass_vtable)
2374 *out_pass_vtable = pass_vtable;
2375 if (out_pass_mrgctx)
2376 *out_pass_mrgctx = pass_mrgctx;
2379 inline static MonoCallInst *
2380 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2381 MonoInst **args, int calli, int virtual_, int tail, int rgctx, int unbox_trampoline)
2385 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2393 emit_instrumentation_call (cfg, mono_profiler_method_leave);
2395 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2397 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (cfg, sig->ret, calli, virtual_));
2400 call->signature = sig;
2401 call->rgctx_reg = rgctx;
2402 sig_ret = mini_get_underlying_type (sig->ret);
2404 type_to_eval_stack_type ((cfg), sig_ret, &call->inst);
2407 if (mini_type_is_vtype (sig_ret)) {
2408 call->vret_var = cfg->vret_addr;
2409 //g_assert_not_reached ();
2411 } else if (mini_type_is_vtype (sig_ret)) {
2412 MonoInst *temp = mono_compile_create_var (cfg, sig_ret, OP_LOCAL);
2415 temp->backend.is_pinvoke = sig->pinvoke;
2418 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2419 * address of return value to increase optimization opportunities.
2420 * Before vtype decomposition, the dreg of the call ins itself represents the
2421 * fact the call modifies the return value. After decomposition, the call will
2422 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2423 * will be transformed into an LDADDR.
2425 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2426 loada->dreg = alloc_preg (cfg);
2427 loada->inst_p0 = temp;
2428 /* We reference the call too since call->dreg could change during optimization */
2429 loada->inst_p1 = call;
2430 MONO_ADD_INS (cfg->cbb, loada);
2432 call->inst.dreg = temp->dreg;
2434 call->vret_var = loada;
2435 } else if (!MONO_TYPE_IS_VOID (sig_ret))
2436 call->inst.dreg = alloc_dreg (cfg, (MonoStackType)call->inst.type);
2438 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2439 if (COMPILE_SOFT_FLOAT (cfg)) {
2441 * If the call has a float argument, we would need to do an r8->r4 conversion using
2442 * an icall, but that cannot be done during the call sequence since it would clobber
2443 * the call registers + the stack. So we do it before emitting the call.
2445 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2447 MonoInst *in = call->args [i];
2449 if (i >= sig->hasthis)
2450 t = sig->params [i - sig->hasthis];
2452 t = &mono_defaults.int_class->byval_arg;
2453 t = mono_type_get_underlying_type (t);
2455 if (!t->byref && t->type == MONO_TYPE_R4) {
2456 MonoInst *iargs [1];
2460 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2462 /* The result will be in an int vreg */
2463 call->args [i] = conv;
2469 call->need_unbox_trampoline = unbox_trampoline;
2472 if (COMPILE_LLVM (cfg))
2473 mono_llvm_emit_call (cfg, call);
2475 mono_arch_emit_call (cfg, call);
2477 mono_arch_emit_call (cfg, call);
2480 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2481 cfg->flags |= MONO_CFG_HAS_CALLS;
2487 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2489 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2490 cfg->uses_rgctx_reg = TRUE;
2491 call->rgctx_reg = TRUE;
2493 call->rgctx_arg_reg = rgctx_reg;
2497 inline static MonoInst*
2498 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *imt_arg, MonoInst *rgctx_arg)
2503 gboolean check_sp = FALSE;
2505 if (cfg->check_pinvoke_callconv && cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
2506 WrapperInfo *info = mono_marshal_get_wrapper_info (cfg->method);
2508 if (info && info->subtype == WRAPPER_SUBTYPE_PINVOKE)
2513 rgctx_reg = mono_alloc_preg (cfg);
2514 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2518 if (!cfg->stack_inbalance_var)
2519 cfg->stack_inbalance_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2521 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2522 ins->dreg = cfg->stack_inbalance_var->dreg;
2523 MONO_ADD_INS (cfg->cbb, ins);
2526 call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE, rgctx_arg ? TRUE : FALSE, FALSE);
2528 call->inst.sreg1 = addr->dreg;
2531 emit_imt_argument (cfg, call, NULL, imt_arg);
2533 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2538 sp_reg = mono_alloc_preg (cfg);
2540 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2542 MONO_ADD_INS (cfg->cbb, ins);
2544 /* Restore the stack so we don't crash when throwing the exception */
2545 MONO_INST_NEW (cfg, ins, OP_SET_SP);
2546 ins->sreg1 = cfg->stack_inbalance_var->dreg;
2547 MONO_ADD_INS (cfg->cbb, ins);
2549 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, cfg->stack_inbalance_var->dreg, sp_reg);
2550 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ExecutionEngineException");
2554 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2556 return (MonoInst*)call;
2560 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2563 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type);
2566 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig, gboolean tail,
2567 MonoInst **args, MonoInst *this_ins, MonoInst *imt_arg, MonoInst *rgctx_arg)
2569 #ifndef DISABLE_REMOTING
2570 gboolean might_be_remote = FALSE;
2572 gboolean virtual_ = this_ins != NULL;
2573 gboolean enable_for_aot = TRUE;
2576 MonoInst *call_target = NULL;
2578 gboolean need_unbox_trampoline;
2581 sig = mono_method_signature (method);
2583 if (cfg->llvm_only && (mono_class_is_interface (method->klass)))
2584 g_assert_not_reached ();
2587 rgctx_reg = mono_alloc_preg (cfg);
2588 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2591 if (method->string_ctor) {
2592 /* Create the real signature */
2593 /* FIXME: Cache these */
2594 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2595 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2600 context_used = mini_method_check_context_used (cfg, method);
2602 #ifndef DISABLE_REMOTING
2603 might_be_remote = this_ins && sig->hasthis &&
2604 (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) &&
2605 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && (!MONO_CHECK_THIS (this_ins) || context_used);
2607 if (might_be_remote && context_used) {
2610 g_assert (cfg->gshared);
2612 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2614 return mono_emit_calli (cfg, sig, args, addr, NULL, NULL);
2618 if (cfg->llvm_only && !call_target && virtual_ && (method->flags & METHOD_ATTRIBUTE_VIRTUAL))
2619 return emit_llvmonly_virtual_call (cfg, method, sig, 0, args);
2621 need_unbox_trampoline = method->klass == mono_defaults.object_class || mono_class_is_interface (method->klass);
2623 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual_, tail, rgctx_arg ? TRUE : FALSE, need_unbox_trampoline);
2625 #ifndef DISABLE_REMOTING
2626 if (might_be_remote)
2627 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2630 call->method = method;
2631 call->inst.flags |= MONO_INST_HAS_METHOD;
2632 call->inst.inst_left = this_ins;
2633 call->tail_call = tail;
2636 int vtable_reg, slot_reg, this_reg;
2639 this_reg = this_ins->dreg;
2641 if (!cfg->llvm_only && (method->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (method->name, "Invoke")) {
2642 MonoInst *dummy_use;
2644 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2646 /* Make a call to delegate->invoke_impl */
2647 call->inst.inst_basereg = this_reg;
2648 call->inst.inst_offset = MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2649 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2651 /* We must emit a dummy use here because the delegate trampoline will
2652 replace the 'this' argument with the delegate target making this activation
2653 no longer a root for the delegate.
2654 This is an issue for delegates that target collectible code such as dynamic
2655 methods of GC'able assemblies.
2657 For a test case look into #667921.
2659 FIXME: a dummy use is not the best way to do it as the local register allocator
2660 will put it on a caller save register and spil it around the call.
2661 Ideally, we would either put it on a callee save register or only do the store part.
2663 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [0]);
2665 return (MonoInst*)call;
2668 if ((!cfg->compile_aot || enable_for_aot) &&
2669 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2670 (MONO_METHOD_IS_FINAL (method) &&
2671 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2672 !(mono_class_is_marshalbyref (method->klass) && context_used)) {
2674 * the method is not virtual, we just need to ensure this is not null
2675 * and then we can call the method directly.
2677 #ifndef DISABLE_REMOTING
2678 if (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) {
2680 * The check above ensures method is not gshared, this is needed since
2681 * gshared methods can't have wrappers.
2683 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2687 if (!method->string_ctor)
2688 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2690 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2691 } else if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2693 * the method is virtual, but we can statically dispatch since either
2694 * it's class or the method itself are sealed.
2695 * But first we need to ensure it's not a null reference.
2697 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2699 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2700 } else if (call_target) {
2701 vtable_reg = alloc_preg (cfg);
2702 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2704 call->inst.opcode = callvirt_to_call_reg (call->inst.opcode);
2705 call->inst.sreg1 = call_target->dreg;
2706 call->inst.flags &= !MONO_INST_HAS_METHOD;
2708 vtable_reg = alloc_preg (cfg);
2709 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2710 if (mono_class_is_interface (method->klass)) {
2711 guint32 imt_slot = mono_method_get_imt_slot (method);
2712 emit_imt_argument (cfg, call, call->method, imt_arg);
2713 slot_reg = vtable_reg;
2714 offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2716 slot_reg = vtable_reg;
2717 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) +
2718 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2720 g_assert (mono_method_signature (method)->generic_param_count);
2721 emit_imt_argument (cfg, call, call->method, imt_arg);
2725 call->inst.sreg1 = slot_reg;
2726 call->inst.inst_offset = offset;
2727 call->is_virtual = TRUE;
2731 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2734 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2736 return (MonoInst*)call;
2740 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this_ins)
2742 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), FALSE, args, this_ins, NULL, NULL);
2746 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2753 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE, FALSE, FALSE);
2756 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2758 return (MonoInst*)call;
2762 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2764 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2768 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2772 * mono_emit_abs_call:
2774 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2776 inline static MonoInst*
2777 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2778 MonoMethodSignature *sig, MonoInst **args)
2780 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2784 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2787 if (cfg->abs_patches == NULL)
2788 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2789 g_hash_table_insert (cfg->abs_patches, ji, ji);
2790 ins = mono_emit_native_call (cfg, ji, sig, args);
2791 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2795 static MonoMethodSignature*
2796 sig_to_rgctx_sig (MonoMethodSignature *sig)
2798 // FIXME: memory allocation
2799 MonoMethodSignature *res;
2802 res = (MonoMethodSignature *)g_malloc (MONO_SIZEOF_METHOD_SIGNATURE + (sig->param_count + 1) * sizeof (MonoType*));
2803 memcpy (res, sig, MONO_SIZEOF_METHOD_SIGNATURE);
2804 res->param_count = sig->param_count + 1;
2805 for (i = 0; i < sig->param_count; ++i)
2806 res->params [i] = sig->params [i];
2807 res->params [sig->param_count] = &mono_defaults.int_class->this_arg;
2811 /* Make an indirect call to FSIG passing an additional argument */
2813 emit_extra_arg_calli (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **orig_args, int arg_reg, MonoInst *call_target)
2815 MonoMethodSignature *csig;
2816 MonoInst *args_buf [16];
2818 int i, pindex, tmp_reg;
2820 /* Make a call with an rgctx/extra arg */
2821 if (fsig->param_count + 2 < 16)
2824 args = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (fsig->param_count + 2));
2827 args [pindex ++] = orig_args [0];
2828 for (i = 0; i < fsig->param_count; ++i)
2829 args [pindex ++] = orig_args [fsig->hasthis + i];
2830 tmp_reg = alloc_preg (cfg);
2831 EMIT_NEW_UNALU (cfg, args [pindex], OP_MOVE, tmp_reg, arg_reg);
2832 csig = sig_to_rgctx_sig (fsig);
2833 return mono_emit_calli (cfg, csig, args, call_target, NULL, NULL);
2836 /* Emit an indirect call to the function descriptor ADDR */
2838 emit_llvmonly_calli (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, MonoInst *addr)
2840 int addr_reg, arg_reg;
2841 MonoInst *call_target;
2843 g_assert (cfg->llvm_only);
2846 * addr points to a <addr, arg> pair, load both of them, and
2847 * make a call to addr, passing arg as an extra arg.
2849 addr_reg = alloc_preg (cfg);
2850 EMIT_NEW_LOAD_MEMBASE (cfg, call_target, OP_LOAD_MEMBASE, addr_reg, addr->dreg, 0);
2851 arg_reg = alloc_preg (cfg);
2852 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, arg_reg, addr->dreg, sizeof (gpointer));
2854 return emit_extra_arg_calli (cfg, fsig, args, arg_reg, call_target);
2858 direct_icalls_enabled (MonoCompile *cfg)
2860 /* LLVM on amd64 can't handle calls to non-32 bit addresses */
2862 if (cfg->compile_llvm && !cfg->llvm_only)
2865 if (cfg->gen_sdb_seq_points || cfg->disable_direct_icalls)
2871 mono_emit_jit_icall_by_info (MonoCompile *cfg, int il_offset, MonoJitICallInfo *info, MonoInst **args)
2874 * Call the jit icall without a wrapper if possible.
2875 * The wrapper is needed for the following reasons:
2876 * - to handle exceptions thrown using mono_raise_exceptions () from the
2877 * icall function. The EH code needs the lmf frame pushed by the
2878 * wrapper to be able to unwind back to managed code.
2879 * - to be able to do stack walks for asynchronously suspended
2880 * threads when debugging.
2882 if (info->no_raise && direct_icalls_enabled (cfg)) {
2886 if (!info->wrapper_method) {
2887 name = g_strdup_printf ("__icall_wrapper_%s", info->name);
2888 info->wrapper_method = mono_marshal_get_icall_wrapper (info->sig, name, info->func, TRUE);
2890 mono_memory_barrier ();
2894 * Inline the wrapper method, which is basically a call to the C icall, and
2895 * an exception check.
2897 costs = inline_method (cfg, info->wrapper_method, NULL,
2898 args, NULL, il_offset, TRUE);
2899 g_assert (costs > 0);
2900 g_assert (!MONO_TYPE_IS_VOID (info->sig->ret));
2904 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2909 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2911 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2912 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2916 * Native code might return non register sized integers
2917 * without initializing the upper bits.
2919 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
2920 case OP_LOADI1_MEMBASE:
2921 widen_op = OP_ICONV_TO_I1;
2923 case OP_LOADU1_MEMBASE:
2924 widen_op = OP_ICONV_TO_U1;
2926 case OP_LOADI2_MEMBASE:
2927 widen_op = OP_ICONV_TO_I2;
2929 case OP_LOADU2_MEMBASE:
2930 widen_op = OP_ICONV_TO_U2;
2936 if (widen_op != -1) {
2937 int dreg = alloc_preg (cfg);
2940 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
2941 widen->type = ins->type;
2952 emit_method_access_failure (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
2954 MonoInst *args [16];
2956 args [0] = emit_get_rgctx_method (cfg, mono_method_check_context_used (caller), caller, MONO_RGCTX_INFO_METHOD);
2957 args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (callee), callee, MONO_RGCTX_INFO_METHOD);
2959 mono_emit_jit_icall (cfg, mono_throw_method_access, args);
2963 get_memcpy_method (void)
2965 static MonoMethod *memcpy_method = NULL;
2966 if (!memcpy_method) {
2967 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2969 g_error ("Old corlib found. Install a new one");
2971 return memcpy_method;
2975 create_write_barrier_bitmap (MonoCompile *cfg, MonoClass *klass, unsigned *wb_bitmap, int offset)
2977 MonoClassField *field;
2978 gpointer iter = NULL;
2980 while ((field = mono_class_get_fields (klass, &iter))) {
2983 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
2985 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
2986 if (mini_type_is_reference (mono_field_get_type (field))) {
2987 g_assert ((foffset % SIZEOF_VOID_P) == 0);
2988 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
2990 MonoClass *field_class = mono_class_from_mono_type (field->type);
2991 if (field_class->has_references)
2992 create_write_barrier_bitmap (cfg, field_class, wb_bitmap, offset + foffset);
2998 emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value)
3000 int card_table_shift_bits;
3001 gpointer card_table_mask;
3003 MonoInst *dummy_use;
3004 int nursery_shift_bits;
3005 size_t nursery_size;
3007 if (!cfg->gen_write_barriers)
3010 card_table = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
3012 mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
3014 if (cfg->backend->have_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0 && !COMPILE_LLVM (cfg)) {
3017 MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
3018 wbarrier->sreg1 = ptr->dreg;
3019 wbarrier->sreg2 = value->dreg;
3020 MONO_ADD_INS (cfg->cbb, wbarrier);
3021 } else if (card_table && !cfg->compile_aot && !mono_gc_card_table_nursery_check ()) {
3022 int offset_reg = alloc_preg (cfg);
3026 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
3027 if (card_table_mask)
3028 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
3030 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
3031 * IMM's larger than 32bits.
3033 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR, NULL);
3034 card_reg = ins->dreg;
3036 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
3037 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
3039 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
3040 mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
3043 EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
3047 mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
3049 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
3050 unsigned need_wb = 0;
3055 /*types with references can't have alignment smaller than sizeof(void*) */
3056 if (align < SIZEOF_VOID_P)
3059 /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
3060 if (size > 32 * SIZEOF_VOID_P)
3063 create_write_barrier_bitmap (cfg, klass, &need_wb, 0);
3065 /* We don't unroll more than 5 stores to avoid code bloat. */
3066 if (size > 5 * SIZEOF_VOID_P) {
3067 /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
3068 size += (SIZEOF_VOID_P - 1);
3069 size &= ~(SIZEOF_VOID_P - 1);
3071 EMIT_NEW_ICONST (cfg, iargs [2], size);
3072 EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
3073 mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
3077 destreg = iargs [0]->dreg;
3078 srcreg = iargs [1]->dreg;
3081 dest_ptr_reg = alloc_preg (cfg);
3082 tmp_reg = alloc_preg (cfg);
3085 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
3087 while (size >= SIZEOF_VOID_P) {
3088 MonoInst *load_inst;
3089 MONO_INST_NEW (cfg, load_inst, OP_LOAD_MEMBASE);
3090 load_inst->dreg = tmp_reg;
3091 load_inst->inst_basereg = srcreg;
3092 load_inst->inst_offset = offset;
3093 MONO_ADD_INS (cfg->cbb, load_inst);
3095 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
3098 emit_write_barrier (cfg, iargs [0], load_inst);
3100 offset += SIZEOF_VOID_P;
3101 size -= SIZEOF_VOID_P;
3104 /*tmp += sizeof (void*)*/
3105 if (size >= SIZEOF_VOID_P) {
3106 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
3107 MONO_ADD_INS (cfg->cbb, iargs [0]);
3111 /* Those cannot be references since size < sizeof (void*) */
3113 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
3114 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
3120 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
3121 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
3127 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
3128 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
3137 * Emit code to copy a valuetype of type @klass whose address is stored in
3138 * @src->dreg to memory whose address is stored at @dest->dreg.
3141 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
3143 MonoInst *iargs [4];
3146 MonoMethod *memcpy_method;
3147 MonoInst *size_ins = NULL;
3148 MonoInst *memcpy_ins = NULL;
3152 klass = mono_class_from_mono_type (mini_get_underlying_type (&klass->byval_arg));
3155 * This check breaks with spilled vars... need to handle it during verification anyway.
3156 * g_assert (klass && klass == src->klass && klass == dest->klass);
3159 if (mini_is_gsharedvt_klass (klass)) {
3161 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3162 memcpy_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_MEMCPY);
3166 n = mono_class_native_size (klass, &align);
3168 n = mono_class_value_size (klass, &align);
3170 /* if native is true there should be no references in the struct */
3171 if (cfg->gen_write_barriers && (klass->has_references || size_ins) && !native) {
3172 /* Avoid barriers when storing to the stack */
3173 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
3174 (dest->opcode == OP_LDADDR))) {
3180 context_used = mini_class_check_context_used (cfg, klass);
3182 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
3183 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
3185 } else if (context_used) {
3186 iargs [2] = mini_emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3188 iargs [2] = emit_runtime_constant (cfg, MONO_PATCH_INFO_CLASS, klass);
3189 if (!cfg->compile_aot)
3190 mono_class_compute_gc_descriptor (klass);
3194 mono_emit_jit_icall (cfg, mono_gsharedvt_value_copy, iargs);
3196 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
3201 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 8) {
3202 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
3203 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
3208 iargs [2] = size_ins;
3210 EMIT_NEW_ICONST (cfg, iargs [2], n);
3212 memcpy_method = get_memcpy_method ();
3214 mono_emit_calli (cfg, mono_method_signature (memcpy_method), iargs, memcpy_ins, NULL, NULL);
3216 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
3221 get_memset_method (void)
3223 static MonoMethod *memset_method = NULL;
3224 if (!memset_method) {
3225 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
3227 g_error ("Old corlib found. Install a new one");
3229 return memset_method;
3233 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
3235 MonoInst *iargs [3];
3238 MonoMethod *memset_method;
3239 MonoInst *size_ins = NULL;
3240 MonoInst *bzero_ins = NULL;
3241 static MonoMethod *bzero_method;
3243 /* FIXME: Optimize this for the case when dest is an LDADDR */
3244 mono_class_init (klass);
3245 if (mini_is_gsharedvt_klass (klass)) {
3246 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3247 bzero_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_BZERO);
3249 bzero_method = mono_class_get_method_from_name (mono_defaults.string_class, "bzero_aligned_1", 2);
3250 g_assert (bzero_method);
3252 iargs [1] = size_ins;
3253 mono_emit_calli (cfg, mono_method_signature (bzero_method), iargs, bzero_ins, NULL, NULL);
3257 klass = mono_class_from_mono_type (mini_get_underlying_type (&klass->byval_arg));
3259 n = mono_class_value_size (klass, &align);
3261 if (n <= sizeof (gpointer) * 8) {
3262 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
3265 memset_method = get_memset_method ();
3267 EMIT_NEW_ICONST (cfg, iargs [1], 0);
3268 EMIT_NEW_ICONST (cfg, iargs [2], n);
3269 mono_emit_method_call (cfg, memset_method, iargs, NULL);
3276 * Emit IR to return either the this pointer for instance method,
3277 * or the mrgctx for static methods.
3280 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
3282 MonoInst *this_ins = NULL;
3284 g_assert (cfg->gshared);
3286 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
3287 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
3288 !method->klass->valuetype)
3289 EMIT_NEW_VARLOAD (cfg, this_ins, cfg->this_arg, &mono_defaults.object_class->byval_arg);
3291 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
3292 MonoInst *mrgctx_loc, *mrgctx_var;
3294 g_assert (!this_ins);
3295 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
3297 mrgctx_loc = mono_get_vtable_var (cfg);
3298 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
3301 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
3302 MonoInst *vtable_loc, *vtable_var;
3304 g_assert (!this_ins);
3306 vtable_loc = mono_get_vtable_var (cfg);
3307 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
3309 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
3310 MonoInst *mrgctx_var = vtable_var;
3313 vtable_reg = alloc_preg (cfg);
3314 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, MONO_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
3315 vtable_var->type = STACK_PTR;
3323 vtable_reg = alloc_preg (cfg);
3324 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this_ins->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3329 static MonoJumpInfoRgctxEntry *
3330 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, MonoRgctxInfoType info_type)
3332 MonoJumpInfoRgctxEntry *res = (MonoJumpInfoRgctxEntry *)mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
3333 res->method = method;
3334 res->in_mrgctx = in_mrgctx;
3335 res->data = (MonoJumpInfo *)mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
3336 res->data->type = patch_type;
3337 res->data->data.target = patch_data;
3338 res->info_type = info_type;
3343 static inline MonoInst*
3344 emit_rgctx_fetch_inline (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3346 MonoInst *args [16];
3349 // FIXME: No fastpath since the slot is not a compile time constant
3351 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_RGCTX_SLOT_INDEX, entry);
3352 if (entry->in_mrgctx)
3353 call = mono_emit_jit_icall (cfg, mono_fill_method_rgctx, args);
3355 call = mono_emit_jit_icall (cfg, mono_fill_class_rgctx, args);
3359 * FIXME: This can be called during decompose, which is a problem since it creates
3361 * Also, the fastpath doesn't work since the slot number is dynamically allocated.
3363 int i, slot, depth, index, rgctx_reg, val_reg, res_reg;
3365 MonoBasicBlock *is_null_bb, *end_bb;
3366 MonoInst *res, *ins, *call;
3369 slot = mini_get_rgctx_entry_slot (entry);
3371 mrgctx = MONO_RGCTX_SLOT_IS_MRGCTX (slot);
3372 index = MONO_RGCTX_SLOT_INDEX (slot);
3374 index += MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT / sizeof (gpointer);
3375 for (depth = 0; ; ++depth) {
3376 int size = mono_class_rgctx_get_array_size (depth, mrgctx);
3378 if (index < size - 1)
3383 NEW_BBLOCK (cfg, end_bb);
3384 NEW_BBLOCK (cfg, is_null_bb);
3387 rgctx_reg = rgctx->dreg;
3389 rgctx_reg = alloc_preg (cfg);
3391 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, rgctx_reg, rgctx->dreg, MONO_STRUCT_OFFSET (MonoVTable, runtime_generic_context));
3392 // FIXME: Avoid this check by allocating the table when the vtable is created etc.
3393 NEW_BBLOCK (cfg, is_null_bb);
3395 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rgctx_reg, 0);
3396 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3399 for (i = 0; i < depth; ++i) {
3400 int array_reg = alloc_preg (cfg);
3402 /* load ptr to next array */
3403 if (mrgctx && i == 0)
3404 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, rgctx_reg, MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT);
3406 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, rgctx_reg, 0);
3407 rgctx_reg = array_reg;
3408 /* is the ptr null? */
3409 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rgctx_reg, 0);
3410 /* if yes, jump to actual trampoline */
3411 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3415 val_reg = alloc_preg (cfg);
3416 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, val_reg, rgctx_reg, (index + 1) * sizeof (gpointer));
3417 /* is the slot null? */
3418 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, val_reg, 0);
3419 /* if yes, jump to actual trampoline */
3420 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3423 res_reg = alloc_preg (cfg);
3424 MONO_INST_NEW (cfg, ins, OP_MOVE);
3425 ins->dreg = res_reg;
3426 ins->sreg1 = val_reg;
3427 MONO_ADD_INS (cfg->cbb, ins);
3429 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3432 MONO_START_BB (cfg, is_null_bb);
3434 EMIT_NEW_ICONST (cfg, args [1], index);
3436 call = mono_emit_jit_icall (cfg, mono_fill_method_rgctx, args);
3438 call = mono_emit_jit_icall (cfg, mono_fill_class_rgctx, args);
3439 MONO_INST_NEW (cfg, ins, OP_MOVE);
3440 ins->dreg = res_reg;
3441 ins->sreg1 = call->dreg;
3442 MONO_ADD_INS (cfg->cbb, ins);
3443 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3445 MONO_START_BB (cfg, end_bb);
3454 * Emit IR to load the value of the rgctx entry ENTRY from the rgctx
3457 static inline MonoInst*
3458 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3461 return emit_rgctx_fetch_inline (cfg, rgctx, entry);
3463 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
3467 mini_emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
3468 MonoClass *klass, MonoRgctxInfoType rgctx_type)
3470 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
3471 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3473 return emit_rgctx_fetch (cfg, rgctx, entry);
3477 emit_get_rgctx_sig (MonoCompile *cfg, int context_used,
3478 MonoMethodSignature *sig, MonoRgctxInfoType rgctx_type)
3480 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_SIGNATURE, sig, rgctx_type);
3481 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3483 return emit_rgctx_fetch (cfg, rgctx, entry);
3487 emit_get_rgctx_gsharedvt_call (MonoCompile *cfg, int context_used,
3488 MonoMethodSignature *sig, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3490 MonoJumpInfoGSharedVtCall *call_info;
3491 MonoJumpInfoRgctxEntry *entry;
3494 call_info = (MonoJumpInfoGSharedVtCall *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoGSharedVtCall));
3495 call_info->sig = sig;
3496 call_info->method = cmethod;
3498 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_CALL, call_info, rgctx_type);
3499 rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3501 return emit_rgctx_fetch (cfg, rgctx, entry);
3505 * emit_get_rgctx_virt_method:
3507 * Return data for method VIRT_METHOD for a receiver of type KLASS.
3510 emit_get_rgctx_virt_method (MonoCompile *cfg, int context_used,
3511 MonoClass *klass, MonoMethod *virt_method, MonoRgctxInfoType rgctx_type)
3513 MonoJumpInfoVirtMethod *info;
3514 MonoJumpInfoRgctxEntry *entry;
3517 info = (MonoJumpInfoVirtMethod *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoVirtMethod));
3518 info->klass = klass;
3519 info->method = virt_method;
3521 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_VIRT_METHOD, info, rgctx_type);
3522 rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3524 return emit_rgctx_fetch (cfg, rgctx, entry);
3528 emit_get_rgctx_gsharedvt_method (MonoCompile *cfg, int context_used,
3529 MonoMethod *cmethod, MonoGSharedVtMethodInfo *info)
3531 MonoJumpInfoRgctxEntry *entry;
3534 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_METHOD, info, MONO_RGCTX_INFO_METHOD_GSHAREDVT_INFO);
3535 rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3537 return emit_rgctx_fetch (cfg, rgctx, entry);
3541 * emit_get_rgctx_method:
3543 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
3544 * normal constants, else emit a load from the rgctx.
3547 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
3548 MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3550 if (!context_used) {
3553 switch (rgctx_type) {
3554 case MONO_RGCTX_INFO_METHOD:
3555 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
3557 case MONO_RGCTX_INFO_METHOD_RGCTX:
3558 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
3561 g_assert_not_reached ();
3564 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
3565 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3567 return emit_rgctx_fetch (cfg, rgctx, entry);
3572 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
3573 MonoClassField *field, MonoRgctxInfoType rgctx_type)
3575 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
3576 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3578 return emit_rgctx_fetch (cfg, rgctx, entry);
3582 get_gsharedvt_info_slot (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3584 MonoGSharedVtMethodInfo *info = cfg->gsharedvt_info;
3585 MonoRuntimeGenericContextInfoTemplate *template_;
3590 for (i = 0; i < info->num_entries; ++i) {
3591 MonoRuntimeGenericContextInfoTemplate *otemplate = &info->entries [i];
3593 if (otemplate->info_type == rgctx_type && otemplate->data == data && rgctx_type != MONO_RGCTX_INFO_LOCAL_OFFSET)
3597 if (info->num_entries == info->count_entries) {
3598 MonoRuntimeGenericContextInfoTemplate *new_entries;
3599 int new_count_entries = info->count_entries ? info->count_entries * 2 : 16;
3601 new_entries = (MonoRuntimeGenericContextInfoTemplate *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * new_count_entries);
3603 memcpy (new_entries, info->entries, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
3604 info->entries = new_entries;
3605 info->count_entries = new_count_entries;
3608 idx = info->num_entries;
3609 template_ = &info->entries [idx];
3610 template_->info_type = rgctx_type;
3611 template_->data = data;
3613 info->num_entries ++;
3619 * emit_get_gsharedvt_info:
3621 * This is similar to emit_get_rgctx_.., but loads the data from the gsharedvt info var instead of calling an rgctx fetch trampoline.
3624 emit_get_gsharedvt_info (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3629 idx = get_gsharedvt_info_slot (cfg, data, rgctx_type);
3630 /* Load info->entries [idx] */
3631 dreg = alloc_preg (cfg);
3632 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, cfg->gsharedvt_info_var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
3638 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type)
3640 return emit_get_gsharedvt_info (cfg, &klass->byval_arg, rgctx_type);
3644 * On return the caller must check @klass for load errors.
3647 emit_class_init (MonoCompile *cfg, MonoClass *klass)
3649 MonoInst *vtable_arg;
3652 context_used = mini_class_check_context_used (cfg, klass);
3655 vtable_arg = mini_emit_get_rgctx_klass (cfg, context_used,
3656 klass, MONO_RGCTX_INFO_VTABLE);
3658 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3662 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
3665 if (!COMPILE_LLVM (cfg) && cfg->backend->have_op_generic_class_init) {
3669 * Using an opcode instead of emitting IR here allows the hiding of the call inside the opcode,
3670 * so this doesn't have to clobber any regs and it doesn't break basic blocks.
3672 MONO_INST_NEW (cfg, ins, OP_GENERIC_CLASS_INIT);
3673 ins->sreg1 = vtable_arg->dreg;
3674 MONO_ADD_INS (cfg->cbb, ins);
3677 MonoBasicBlock *inited_bb;
3678 MonoInst *args [16];
3680 inited_reg = alloc_ireg (cfg);
3682 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, inited_reg, vtable_arg->dreg, MONO_STRUCT_OFFSET (MonoVTable, initialized));
3684 NEW_BBLOCK (cfg, inited_bb);
3686 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, inited_reg, 0);
3687 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBNE_UN, inited_bb);
3689 args [0] = vtable_arg;
3690 mono_emit_jit_icall (cfg, mono_generic_class_init, args);
3692 MONO_START_BB (cfg, inited_bb);
3697 emit_seq_point (MonoCompile *cfg, MonoMethod *method, guint8* ip, gboolean intr_loc, gboolean nonempty_stack)
3701 if (cfg->gen_seq_points && cfg->method == method) {
3702 NEW_SEQ_POINT (cfg, ins, ip - cfg->header->code, intr_loc);
3704 ins->flags |= MONO_INST_NONEMPTY_STACK;
3705 MONO_ADD_INS (cfg->cbb, ins);
3710 mini_save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg, gboolean null_check)
3712 if (mini_get_debug_options ()->better_cast_details) {
3713 int vtable_reg = alloc_preg (cfg);
3714 int klass_reg = alloc_preg (cfg);
3715 MonoBasicBlock *is_null_bb = NULL;
3717 int to_klass_reg, context_used;
3720 NEW_BBLOCK (cfg, is_null_bb);
3722 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3723 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3726 tls_get = mono_create_tls_get (cfg, TLS_KEY_JIT_TLS);
3728 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3732 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3733 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3735 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3737 context_used = mini_class_check_context_used (cfg, klass);
3739 MonoInst *class_ins;
3741 class_ins = mini_emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3742 to_klass_reg = class_ins->dreg;
3744 to_klass_reg = alloc_preg (cfg);
3745 MONO_EMIT_NEW_CLASSCONST (cfg, to_klass_reg, klass);
3747 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3750 MONO_START_BB (cfg, is_null_bb);
3755 mini_reset_cast_details (MonoCompile *cfg)
3757 /* Reset the variables holding the cast details */
3758 if (mini_get_debug_options ()->better_cast_details) {
3759 MonoInst *tls_get = mono_create_tls_get (cfg, TLS_KEY_JIT_TLS);
3760 /* It is enough to reset the from field */
3761 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3766 * On return the caller must check @array_class for load errors
3769 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
3771 int vtable_reg = alloc_preg (cfg);
3774 context_used = mini_class_check_context_used (cfg, array_class);
3776 mini_save_cast_details (cfg, array_class, obj->dreg, FALSE);
3778 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3780 if (cfg->opt & MONO_OPT_SHARED) {
3781 int class_reg = alloc_preg (cfg);
3784 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3785 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_CLASS, array_class);
3786 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, ins->dreg);
3787 } else if (context_used) {
3788 MonoInst *vtable_ins;
3790 vtable_ins = mini_emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
3791 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
3793 if (cfg->compile_aot) {
3797 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3799 vt_reg = alloc_preg (cfg);
3800 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
3801 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
3804 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3806 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
3810 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
3812 mini_reset_cast_details (cfg);
3816 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
3817 * generic code is generated.
3820 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
3822 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
3825 MonoInst *rgctx, *addr;
3827 /* FIXME: What if the class is shared? We might not
3828 have to get the address of the method from the
3830 addr = emit_get_rgctx_method (cfg, context_used, method,
3831 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3832 if (cfg->llvm_only) {
3833 cfg->signatures = g_slist_prepend_mempool (cfg->mempool, cfg->signatures, mono_method_signature (method));
3834 return emit_llvmonly_calli (cfg, mono_method_signature (method), &val, addr);
3836 rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3838 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
3841 gboolean pass_vtable, pass_mrgctx;
3842 MonoInst *rgctx_arg = NULL;
3844 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
3845 g_assert (!pass_mrgctx);
3848 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3851 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
3854 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
3859 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
3863 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
3864 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
3865 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
3866 int rank_reg = alloc_dreg (cfg ,STACK_I4);
3868 obj_reg = sp [0]->dreg;
3869 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3870 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
3872 /* FIXME: generics */
3873 g_assert (klass->rank == 0);
3876 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
3877 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3879 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3880 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, element_class));
3883 MonoInst *element_class;
3885 /* This assertion is from the unboxcast insn */
3886 g_assert (klass->rank == 0);
3888 element_class = mini_emit_get_rgctx_klass (cfg, context_used,
3889 klass, MONO_RGCTX_INFO_ELEMENT_KLASS);
3891 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
3892 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3894 mini_save_cast_details (cfg, klass->element_class, obj_reg, FALSE);
3895 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
3896 mini_reset_cast_details (cfg);
3899 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), obj_reg, sizeof (MonoObject));
3900 MONO_ADD_INS (cfg->cbb, add);
3901 add->type = STACK_MP;
3908 handle_unbox_gsharedvt (MonoCompile *cfg, MonoClass *klass, MonoInst *obj)
3910 MonoInst *addr, *klass_inst, *is_ref, *args[16];
3911 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
3915 klass_inst = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_KLASS);
3921 args [1] = klass_inst;
3924 obj = mono_emit_jit_icall (cfg, mono_object_castclass_unbox, args);
3926 NEW_BBLOCK (cfg, is_ref_bb);
3927 NEW_BBLOCK (cfg, is_nullable_bb);
3928 NEW_BBLOCK (cfg, end_bb);
3929 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
3930 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
3931 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
3933 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_NULLABLE);
3934 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
3936 /* This will contain either the address of the unboxed vtype, or an address of the temporary where the ref is stored */
3937 addr_reg = alloc_dreg (cfg, STACK_MP);
3941 NEW_BIALU_IMM (cfg, addr, OP_ADD_IMM, addr_reg, obj->dreg, sizeof (MonoObject));
3942 MONO_ADD_INS (cfg->cbb, addr);
3944 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3947 MONO_START_BB (cfg, is_ref_bb);
3949 /* Save the ref to a temporary */
3950 dreg = alloc_ireg (cfg);
3951 EMIT_NEW_VARLOADA_VREG (cfg, addr, dreg, &klass->byval_arg);
3952 addr->dreg = addr_reg;
3953 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, obj->dreg);
3954 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3957 MONO_START_BB (cfg, is_nullable_bb);
3960 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_NULLABLE_CLASS_UNBOX);
3961 MonoInst *unbox_call;
3962 MonoMethodSignature *unbox_sig;
3964 unbox_sig = (MonoMethodSignature *)mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
3965 unbox_sig->ret = &klass->byval_arg;
3966 unbox_sig->param_count = 1;
3967 unbox_sig->params [0] = &mono_defaults.object_class->byval_arg;
3970 unbox_call = emit_llvmonly_calli (cfg, unbox_sig, &obj, addr);
3972 unbox_call = mono_emit_calli (cfg, unbox_sig, &obj, addr, NULL, NULL);
3974 EMIT_NEW_VARLOADA_VREG (cfg, addr, unbox_call->dreg, &klass->byval_arg);
3975 addr->dreg = addr_reg;
3978 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3981 MONO_START_BB (cfg, end_bb);
3984 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr_reg, 0);
3990 * Returns NULL and set the cfg exception on error.
3993 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
3995 MonoInst *iargs [2];
4000 MonoRgctxInfoType rgctx_info;
4001 MonoInst *iargs [2];
4002 gboolean known_instance_size = !mini_is_gsharedvt_klass (klass);
4004 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (klass, for_box, known_instance_size);
4006 if (cfg->opt & MONO_OPT_SHARED)
4007 rgctx_info = MONO_RGCTX_INFO_KLASS;
4009 rgctx_info = MONO_RGCTX_INFO_VTABLE;
4010 data = mini_emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
4012 if (cfg->opt & MONO_OPT_SHARED) {
4013 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4015 alloc_ftn = ves_icall_object_new;
4018 alloc_ftn = ves_icall_object_new_specific;
4021 if (managed_alloc && !(cfg->opt & MONO_OPT_SHARED)) {
4022 if (known_instance_size) {
4023 int size = mono_class_instance_size (klass);
4024 if (size < sizeof (MonoObject))
4025 g_error ("Invalid size %d for class %s", size, mono_type_get_full_name (klass));
4027 EMIT_NEW_ICONST (cfg, iargs [1], size);
4029 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
4032 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
4035 if (cfg->opt & MONO_OPT_SHARED) {
4036 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4037 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
4039 alloc_ftn = ves_icall_object_new;
4040 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !mono_class_is_ginst (klass)) {
4041 /* This happens often in argument checking code, eg. throw new FooException... */
4042 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
4043 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
4044 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
4046 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
4047 MonoMethod *managed_alloc = NULL;
4051 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4052 cfg->exception_ptr = klass;
4056 managed_alloc = mono_gc_get_managed_allocator (klass, for_box, TRUE);
4058 if (managed_alloc) {
4059 int size = mono_class_instance_size (klass);
4060 if (size < sizeof (MonoObject))
4061 g_error ("Invalid size %d for class %s", size, mono_type_get_full_name (klass));
4063 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4064 EMIT_NEW_ICONST (cfg, iargs [1], size);
4065 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
4067 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
4069 guint32 lw = vtable->klass->instance_size;
4070 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
4071 EMIT_NEW_ICONST (cfg, iargs [0], lw);
4072 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
4075 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4079 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
4083 * Returns NULL and set the cfg exception on error.
4086 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used)
4088 MonoInst *alloc, *ins;
4090 if (mono_class_is_nullable (klass)) {
4091 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
4094 if (cfg->llvm_only && cfg->gsharedvt) {
4095 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
4096 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
4097 return emit_llvmonly_calli (cfg, mono_method_signature (method), &val, addr);
4099 /* FIXME: What if the class is shared? We might not
4100 have to get the method address from the RGCTX. */
4101 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
4102 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
4103 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
4105 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
4108 gboolean pass_vtable, pass_mrgctx;
4109 MonoInst *rgctx_arg = NULL;
4111 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
4112 g_assert (!pass_mrgctx);
4115 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4118 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
4121 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
4125 if (mini_is_gsharedvt_klass (klass)) {
4126 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
4127 MonoInst *res, *is_ref, *src_var, *addr;
4130 dreg = alloc_ireg (cfg);
4132 NEW_BBLOCK (cfg, is_ref_bb);
4133 NEW_BBLOCK (cfg, is_nullable_bb);
4134 NEW_BBLOCK (cfg, end_bb);
4135 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
4136 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
4137 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
4139 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_NULLABLE);
4140 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
4143 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4146 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4147 ins->opcode = OP_STOREV_MEMBASE;
4149 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, alloc->dreg);
4150 res->type = STACK_OBJ;
4152 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4155 MONO_START_BB (cfg, is_ref_bb);
4157 /* val is a vtype, so has to load the value manually */
4158 src_var = get_vreg_to_inst (cfg, val->dreg);
4160 src_var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, val->dreg);
4161 EMIT_NEW_VARLOADA (cfg, addr, src_var, src_var->inst_vtype);
4162 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, addr->dreg, 0);
4163 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4166 MONO_START_BB (cfg, is_nullable_bb);
4169 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass,
4170 MONO_RGCTX_INFO_NULLABLE_CLASS_BOX);
4172 MonoMethodSignature *box_sig;
4175 * klass is Nullable<T>, need to call Nullable<T>.Box () using a gsharedvt signature, but we cannot
4176 * construct that method at JIT time, so have to do things by hand.
4178 box_sig = (MonoMethodSignature *)mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
4179 box_sig->ret = &mono_defaults.object_class->byval_arg;
4180 box_sig->param_count = 1;
4181 box_sig->params [0] = &klass->byval_arg;
4184 box_call = emit_llvmonly_calli (cfg, box_sig, &val, addr);
4186 box_call = mono_emit_calli (cfg, box_sig, &val, addr, NULL, NULL);
4187 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, box_call->dreg);
4188 res->type = STACK_OBJ;
4192 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4194 MONO_START_BB (cfg, end_bb);
4198 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4202 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4207 static GHashTable* direct_icall_type_hash;
4210 icall_is_direct_callable (MonoCompile *cfg, MonoMethod *cmethod)
4212 /* LLVM on amd64 can't handle calls to non-32 bit addresses */
4213 if (!direct_icalls_enabled (cfg))
4217 * An icall is directly callable if it doesn't directly or indirectly call mono_raise_exception ().
4218 * Whitelist a few icalls for now.
4220 if (!direct_icall_type_hash) {
4221 GHashTable *h = g_hash_table_new (g_str_hash, g_str_equal);
4223 g_hash_table_insert (h, (char*)"Decimal", GUINT_TO_POINTER (1));
4224 g_hash_table_insert (h, (char*)"Number", GUINT_TO_POINTER (1));
4225 g_hash_table_insert (h, (char*)"Buffer", GUINT_TO_POINTER (1));
4226 g_hash_table_insert (h, (char*)"Monitor", GUINT_TO_POINTER (1));
4227 mono_memory_barrier ();
4228 direct_icall_type_hash = h;
4231 if (cmethod->klass == mono_defaults.math_class)
4233 /* No locking needed */
4234 if (cmethod->klass->image == mono_defaults.corlib && g_hash_table_lookup (direct_icall_type_hash, cmethod->klass->name))
4240 method_needs_stack_walk (MonoCompile *cfg, MonoMethod *cmethod)
4242 if (cmethod->klass == mono_defaults.systemtype_class) {
4243 if (!strcmp (cmethod->name, "GetType"))
4249 static G_GNUC_UNUSED MonoInst*
4250 handle_enum_has_flag (MonoCompile *cfg, MonoClass *klass, MonoInst *enum_this, MonoInst *enum_flag)
4252 MonoType *enum_type = mono_type_get_underlying_type (&klass->byval_arg);
4253 guint32 load_opc = mono_type_to_load_membase (cfg, enum_type);
4256 switch (enum_type->type) {
4259 #if SIZEOF_REGISTER == 8
4271 MonoInst *load, *and_, *cmp, *ceq;
4272 int enum_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
4273 int and_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
4274 int dest_reg = alloc_ireg (cfg);
4276 EMIT_NEW_LOAD_MEMBASE (cfg, load, load_opc, enum_reg, enum_this->dreg, 0);
4277 EMIT_NEW_BIALU (cfg, and_, is_i4 ? OP_IAND : OP_LAND, and_reg, enum_reg, enum_flag->dreg);
4278 EMIT_NEW_BIALU (cfg, cmp, is_i4 ? OP_ICOMPARE : OP_LCOMPARE, -1, and_reg, enum_flag->dreg);
4279 EMIT_NEW_UNALU (cfg, ceq, is_i4 ? OP_ICEQ : OP_LCEQ, dest_reg, -1);
4281 ceq->type = STACK_I4;
4284 load = mono_decompose_opcode (cfg, load);
4285 and_ = mono_decompose_opcode (cfg, and_);
4286 cmp = mono_decompose_opcode (cfg, cmp);
4287 ceq = mono_decompose_opcode (cfg, ceq);
4295 * Returns NULL and set the cfg exception on error.
4297 static G_GNUC_UNUSED MonoInst*
4298 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used, gboolean virtual_)
4302 gpointer trampoline;
4303 MonoInst *obj, *method_ins, *tramp_ins;
4307 if (virtual_ && !cfg->llvm_only) {
4308 MonoMethod *invoke = mono_get_delegate_invoke (klass);
4311 if (!mono_get_delegate_virtual_invoke_impl (mono_method_signature (invoke), context_used ? NULL : method))
4315 obj = handle_alloc (cfg, klass, FALSE, mono_class_check_context_used (klass));
4319 /* Inline the contents of mono_delegate_ctor */
4321 /* Set target field */
4322 /* Optimize away setting of NULL target */
4323 if (!MONO_INS_IS_PCONST_NULL (target)) {
4324 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
4325 if (cfg->gen_write_barriers) {
4326 dreg = alloc_preg (cfg);
4327 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target));
4328 emit_write_barrier (cfg, ptr, target);
4332 /* Set method field */
4333 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
4334 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
4337 * To avoid looking up the compiled code belonging to the target method
4338 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
4339 * store it, and we fill it after the method has been compiled.
4341 if (!method->dynamic && !(cfg->opt & MONO_OPT_SHARED)) {
4342 MonoInst *code_slot_ins;
4345 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
4347 domain = mono_domain_get ();
4348 mono_domain_lock (domain);
4349 if (!domain_jit_info (domain)->method_code_hash)
4350 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
4351 code_slot = (guint8 **)g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
4353 code_slot = (guint8 **)mono_domain_alloc0 (domain, sizeof (gpointer));
4354 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
4356 mono_domain_unlock (domain);
4358 code_slot_ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_METHOD_CODE_SLOT, method);
4360 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
4363 if (cfg->llvm_only) {
4364 MonoInst *args [16];
4369 args [2] = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
4370 mono_emit_jit_icall (cfg, mono_llvmonly_init_delegate_virtual, args);
4373 mono_emit_jit_icall (cfg, mono_llvmonly_init_delegate, args);
4379 if (cfg->compile_aot) {
4380 MonoDelegateClassMethodPair *del_tramp;
4382 del_tramp = (MonoDelegateClassMethodPair *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoDelegateClassMethodPair));
4383 del_tramp->klass = klass;
4384 del_tramp->method = context_used ? NULL : method;
4385 del_tramp->is_virtual = virtual_;
4386 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, del_tramp);
4389 trampoline = mono_create_delegate_virtual_trampoline (cfg->domain, klass, context_used ? NULL : method);
4391 trampoline = mono_create_delegate_trampoline_info (cfg->domain, klass, context_used ? NULL : method);
4392 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
4395 /* Set invoke_impl field */
4397 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
4399 dreg = alloc_preg (cfg);
4400 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, invoke_impl));
4401 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), dreg);
4403 dreg = alloc_preg (cfg);
4404 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, method_ptr));
4405 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr), dreg);
4408 dreg = alloc_preg (cfg);
4409 MONO_EMIT_NEW_ICONST (cfg, dreg, virtual_ ? 1 : 0);
4410 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_is_virtual), dreg);
4412 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
4418 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
4420 MonoJitICallInfo *info;
4422 /* Need to register the icall so it gets an icall wrapper */
4423 info = mono_get_array_new_va_icall (rank);
4425 cfg->flags |= MONO_CFG_HAS_VARARGS;
4427 /* mono_array_new_va () needs a vararg calling convention */
4428 cfg->exception_message = g_strdup ("array-new");
4429 cfg->disable_llvm = TRUE;
4431 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
4432 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
4436 * handle_constrained_gsharedvt_call:
4438 * Handle constrained calls where the receiver is a gsharedvt type.
4439 * Return the instruction representing the call. Set the cfg exception on failure.
4442 handle_constrained_gsharedvt_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp, MonoClass *constrained_class,
4443 gboolean *ref_emit_widen)
4445 MonoInst *ins = NULL;
4446 gboolean emit_widen = *ref_emit_widen;
4449 * Constrained calls need to behave differently at runtime dependending on whenever the receiver is instantiated as ref type or as a vtype.
4450 * This is hard to do with the current call code, since we would have to emit a branch and two different calls. So instead, we
4451 * pack the arguments into an array, and do the rest of the work in in an icall.
4453 if (((cmethod->klass == mono_defaults.object_class) || mono_class_is_interface (cmethod->klass) || (!cmethod->klass->valuetype && cmethod->klass->image != mono_defaults.corlib)) &&
4454 (MONO_TYPE_IS_VOID (fsig->ret) || MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_IS_REFERENCE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret) || mono_class_is_enum (mono_class_from_mono_type (fsig->ret)) || mini_is_gsharedvt_type (fsig->ret)) &&
4455 (fsig->param_count == 0 || (!fsig->hasthis && fsig->param_count == 1) || (fsig->param_count == 1 && (MONO_TYPE_IS_REFERENCE (fsig->params [0]) || fsig->params [0]->byref || mini_is_gsharedvt_type (fsig->params [0]))))) {
4456 MonoInst *args [16];
4459 * This case handles calls to
4460 * - object:ToString()/Equals()/GetHashCode(),
4461 * - System.IComparable<T>:CompareTo()
4462 * - System.IEquatable<T>:Equals ()
4463 * plus some simple interface calls enough to support AsyncTaskMethodBuilder.
4467 if (mono_method_check_context_used (cmethod))
4468 args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (cmethod), cmethod, MONO_RGCTX_INFO_METHOD);
4470 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
4471 args [2] = mini_emit_get_rgctx_klass (cfg, mono_class_check_context_used (constrained_class), constrained_class, MONO_RGCTX_INFO_KLASS);
4473 /* !fsig->hasthis is for the wrapper for the Object.GetType () icall */
4474 if (fsig->hasthis && fsig->param_count) {
4475 /* Pass the arguments using a localloc-ed array using the format expected by runtime_invoke () */
4476 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
4477 ins->dreg = alloc_preg (cfg);
4478 ins->inst_imm = fsig->param_count * sizeof (mgreg_t);
4479 MONO_ADD_INS (cfg->cbb, ins);
4482 if (mini_is_gsharedvt_type (fsig->params [0])) {
4483 int addr_reg, deref_arg_reg;
4485 ins = emit_get_gsharedvt_info_klass (cfg, mono_class_from_mono_type (fsig->params [0]), MONO_RGCTX_INFO_CLASS_BOX_TYPE);
4486 deref_arg_reg = alloc_preg (cfg);
4487 /* deref_arg = BOX_TYPE != MONO_GSHAREDVT_BOX_TYPE_VTYPE */
4488 EMIT_NEW_BIALU_IMM (cfg, args [3], OP_ISUB_IMM, deref_arg_reg, ins->dreg, 1);
4490 EMIT_NEW_VARLOADA_VREG (cfg, ins, sp [1]->dreg, fsig->params [0]);
4491 addr_reg = ins->dreg;
4492 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, addr_reg);
4494 EMIT_NEW_ICONST (cfg, args [3], 0);
4495 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, sp [1]->dreg);
4498 EMIT_NEW_ICONST (cfg, args [3], 0);
4499 EMIT_NEW_ICONST (cfg, args [4], 0);
4501 ins = mono_emit_jit_icall (cfg, mono_gsharedvt_constrained_call, args);
4504 if (mini_is_gsharedvt_type (fsig->ret)) {
4505 ins = handle_unbox_gsharedvt (cfg, mono_class_from_mono_type (fsig->ret), ins);
4506 } else if (MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret) || mono_class_is_enum (mono_class_from_mono_type (fsig->ret))) {
4510 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), ins->dreg, sizeof (MonoObject));
4511 MONO_ADD_INS (cfg->cbb, add);
4513 NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, add->dreg, 0);
4514 MONO_ADD_INS (cfg->cbb, ins);
4515 /* ins represents the call result */
4518 GSHAREDVT_FAILURE (CEE_CALLVIRT);
4521 *ref_emit_widen = emit_widen;
4530 mono_emit_load_got_addr (MonoCompile *cfg)
4532 MonoInst *getaddr, *dummy_use;
4534 if (!cfg->got_var || cfg->got_var_allocated)
4537 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
4538 getaddr->cil_code = cfg->header->code;
4539 getaddr->dreg = cfg->got_var->dreg;
4541 /* Add it to the start of the first bblock */
4542 if (cfg->bb_entry->code) {
4543 getaddr->next = cfg->bb_entry->code;
4544 cfg->bb_entry->code = getaddr;
4547 MONO_ADD_INS (cfg->bb_entry, getaddr);
4549 cfg->got_var_allocated = TRUE;
4552 * Add a dummy use to keep the got_var alive, since real uses might
4553 * only be generated by the back ends.
4554 * Add it to end_bblock, so the variable's lifetime covers the whole
4556 * It would be better to make the usage of the got var explicit in all
4557 * cases when the backend needs it (i.e. calls, throw etc.), so this
4558 * wouldn't be needed.
4560 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
4561 MONO_ADD_INS (cfg->bb_exit, dummy_use);
4564 static int inline_limit;
4565 static gboolean inline_limit_inited;
4568 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
4570 MonoMethodHeaderSummary header;
4572 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
4573 MonoMethodSignature *sig = mono_method_signature (method);
4577 if (cfg->disable_inline)
4582 if (cfg->inline_depth > 10)
4585 if (!mono_method_get_header_summary (method, &header))
4588 /*runtime, icall and pinvoke are checked by summary call*/
4589 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
4590 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
4591 (mono_class_is_marshalbyref (method->klass)) ||
4595 /* also consider num_locals? */
4596 /* Do the size check early to avoid creating vtables */
4597 if (!inline_limit_inited) {
4599 if ((inlinelimit = g_getenv ("MONO_INLINELIMIT"))) {
4600 inline_limit = atoi (inlinelimit);
4601 g_free (inlinelimit);
4603 inline_limit = INLINE_LENGTH_LIMIT;
4604 inline_limit_inited = TRUE;
4606 if (header.code_size >= inline_limit && !(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
4610 * if we can initialize the class of the method right away, we do,
4611 * otherwise we don't allow inlining if the class needs initialization,
4612 * since it would mean inserting a call to mono_runtime_class_init()
4613 * inside the inlined code
4615 if (cfg->gshared && method->klass->has_cctor && mini_class_check_context_used (cfg, method->klass))
4618 if (!(cfg->opt & MONO_OPT_SHARED)) {
4619 /* The AggressiveInlining hint is a good excuse to force that cctor to run. */
4620 if (method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING) {
4621 if (method->klass->has_cctor) {
4622 vtable = mono_class_vtable (cfg->domain, method->klass);
4625 if (!cfg->compile_aot) {
4627 if (!mono_runtime_class_init_full (vtable, &error)) {
4628 mono_error_cleanup (&error);
4633 } else if (mono_class_is_before_field_init (method->klass)) {
4634 if (cfg->run_cctors && method->klass->has_cctor) {
4635 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
4636 if (!method->klass->runtime_info)
4637 /* No vtable created yet */
4639 vtable = mono_class_vtable (cfg->domain, method->klass);
4642 /* This makes so that inline cannot trigger */
4643 /* .cctors: too many apps depend on them */
4644 /* running with a specific order... */
4645 if (! vtable->initialized)
4648 if (!mono_runtime_class_init_full (vtable, &error)) {
4649 mono_error_cleanup (&error);
4653 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
4654 if (!method->klass->runtime_info)
4655 /* No vtable created yet */
4657 vtable = mono_class_vtable (cfg->domain, method->klass);
4660 if (!vtable->initialized)
4665 * If we're compiling for shared code
4666 * the cctor will need to be run at aot method load time, for example,
4667 * or at the end of the compilation of the inlining method.
4669 if (mono_class_needs_cctor_run (method->klass, NULL) && !mono_class_is_before_field_init (method->klass))
4673 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
4674 if (mono_arch_is_soft_float ()) {
4676 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
4678 for (i = 0; i < sig->param_count; ++i)
4679 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
4684 if (g_list_find (cfg->dont_inline, method))
4691 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoClass *klass, MonoVTable *vtable)
4693 if (!cfg->compile_aot) {
4695 if (vtable->initialized)
4699 if (mono_class_is_before_field_init (klass)) {
4700 if (cfg->method == method)
4704 if (!mono_class_needs_cctor_run (klass, method))
4707 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (klass == method->klass))
4708 /* The initialization is already done before the method is called */
4715 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
4719 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
4722 if (mini_is_gsharedvt_variable_klass (klass)) {
4725 mono_class_init (klass);
4726 size = mono_class_array_element_size (klass);
4729 mult_reg = alloc_preg (cfg);
4730 array_reg = arr->dreg;
4731 index_reg = index->dreg;
4733 #if SIZEOF_REGISTER == 8
4734 /* The array reg is 64 bits but the index reg is only 32 */
4735 if (COMPILE_LLVM (cfg)) {
4737 index2_reg = index_reg;
4739 index2_reg = alloc_preg (cfg);
4740 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
4743 if (index->type == STACK_I8) {
4744 index2_reg = alloc_preg (cfg);
4745 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
4747 index2_reg = index_reg;
4752 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
4754 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4755 if (size == 1 || size == 2 || size == 4 || size == 8) {
4756 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
4758 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], MONO_STRUCT_OFFSET (MonoArray, vector));
4759 ins->klass = mono_class_get_element_class (klass);
4760 ins->type = STACK_MP;
4766 add_reg = alloc_ireg_mp (cfg);
4769 MonoInst *rgctx_ins;
4772 g_assert (cfg->gshared);
4773 context_used = mini_class_check_context_used (cfg, klass);
4774 g_assert (context_used);
4775 rgctx_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE);
4776 MONO_EMIT_NEW_BIALU (cfg, OP_IMUL, mult_reg, index2_reg, rgctx_ins->dreg);
4778 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
4780 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
4781 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
4782 ins->klass = mono_class_get_element_class (klass);
4783 ins->type = STACK_MP;
4784 MONO_ADD_INS (cfg->cbb, ins);
4790 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
4792 int bounds_reg = alloc_preg (cfg);
4793 int add_reg = alloc_ireg_mp (cfg);
4794 int mult_reg = alloc_preg (cfg);
4795 int mult2_reg = alloc_preg (cfg);
4796 int low1_reg = alloc_preg (cfg);
4797 int low2_reg = alloc_preg (cfg);
4798 int high1_reg = alloc_preg (cfg);
4799 int high2_reg = alloc_preg (cfg);
4800 int realidx1_reg = alloc_preg (cfg);
4801 int realidx2_reg = alloc_preg (cfg);
4802 int sum_reg = alloc_preg (cfg);
4803 int index1, index2, tmpreg;
4807 mono_class_init (klass);
4808 size = mono_class_array_element_size (klass);
4810 index1 = index_ins1->dreg;
4811 index2 = index_ins2->dreg;
4813 #if SIZEOF_REGISTER == 8
4814 /* The array reg is 64 bits but the index reg is only 32 */
4815 if (COMPILE_LLVM (cfg)) {
4818 tmpreg = alloc_preg (cfg);
4819 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index1);
4821 tmpreg = alloc_preg (cfg);
4822 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index2);
4826 // FIXME: Do we need to do something here for i8 indexes, like in ldelema_1_ins ?
4830 /* range checking */
4831 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
4832 arr->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
4834 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
4835 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4836 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
4837 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
4838 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
4839 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
4840 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4842 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
4843 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4844 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
4845 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
4846 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, length));
4847 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
4848 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4850 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
4851 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
4852 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
4853 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
4854 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
4856 ins->type = STACK_MP;
4858 MONO_ADD_INS (cfg->cbb, ins);
4864 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
4868 MonoMethod *addr_method;
4870 MonoClass *eclass = cmethod->klass->element_class;
4872 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
4875 return mini_emit_ldelema_1_ins (cfg, eclass, sp [0], sp [1], TRUE);
4877 /* emit_ldelema_2 depends on OP_LMUL */
4878 if (!cfg->backend->emulate_mul_div && rank == 2 && (cfg->opt & MONO_OPT_INTRINS) && !mini_is_gsharedvt_variable_klass (eclass)) {
4879 return mini_emit_ldelema_2_ins (cfg, eclass, sp [0], sp [1], sp [2]);
4882 if (mini_is_gsharedvt_variable_klass (eclass))
4885 element_size = mono_class_array_element_size (eclass);
4886 addr_method = mono_marshal_get_array_address (rank, element_size);
4887 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
4892 static MonoBreakPolicy
4893 always_insert_breakpoint (MonoMethod *method)
4895 return MONO_BREAK_POLICY_ALWAYS;
4898 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
4901 * mono_set_break_policy:
4902 * policy_callback: the new callback function
4904 * Allow embedders to decide wherther to actually obey breakpoint instructions
4905 * (both break IL instructions and Debugger.Break () method calls), for example
4906 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
4907 * untrusted or semi-trusted code.
4909 * @policy_callback will be called every time a break point instruction needs to
4910 * be inserted with the method argument being the method that calls Debugger.Break()
4911 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
4912 * if it wants the breakpoint to not be effective in the given method.
4913 * #MONO_BREAK_POLICY_ALWAYS is the default.
4916 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
4918 if (policy_callback)
4919 break_policy_func = policy_callback;
4921 break_policy_func = always_insert_breakpoint;
4925 should_insert_brekpoint (MonoMethod *method) {
4926 switch (break_policy_func (method)) {
4927 case MONO_BREAK_POLICY_ALWAYS:
4929 case MONO_BREAK_POLICY_NEVER:
4931 case MONO_BREAK_POLICY_ON_DBG:
4932 g_warning ("mdb no longer supported");
4935 g_warning ("Incorrect value returned from break policy callback");
4940 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
4942 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
4944 MonoInst *addr, *store, *load;
4945 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
4947 /* the bounds check is already done by the callers */
4948 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
4950 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
4951 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
4952 if (mini_type_is_reference (&eklass->byval_arg))
4953 emit_write_barrier (cfg, addr, load);
4955 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
4956 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
4963 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
4965 return mini_type_is_reference (&klass->byval_arg);
4969 emit_array_store (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, gboolean safety_checks)
4971 if (safety_checks && generic_class_is_reference_type (cfg, klass) &&
4972 !(MONO_INS_IS_PCONST_NULL (sp [2]))) {
4973 MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class, 1);
4974 MonoMethod *helper = mono_marshal_get_virtual_stelemref (obj_array);
4975 MonoInst *iargs [3];
4978 mono_class_setup_vtable (obj_array);
4979 g_assert (helper->slot);
4981 if (sp [0]->type != STACK_OBJ)
4983 if (sp [2]->type != STACK_OBJ)
4990 return mono_emit_method_call (cfg, helper, iargs, sp [0]);
4994 if (mini_is_gsharedvt_variable_klass (klass)) {
4997 // FIXME-VT: OP_ICONST optimization
4998 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
4999 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5000 ins->opcode = OP_STOREV_MEMBASE;
5001 } else if (sp [1]->opcode == OP_ICONST) {
5002 int array_reg = sp [0]->dreg;
5003 int index_reg = sp [1]->dreg;
5004 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
5006 if (SIZEOF_REGISTER == 8 && COMPILE_LLVM (cfg))
5007 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, index_reg);
5010 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
5011 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
5013 MonoInst *addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], safety_checks);
5014 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5015 if (generic_class_is_reference_type (cfg, klass))
5016 emit_write_barrier (cfg, addr, sp [2]);
5023 emit_array_unsafe_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
5028 eklass = mono_class_from_mono_type (fsig->params [2]);
5030 eklass = mono_class_from_mono_type (fsig->ret);
5033 return emit_array_store (cfg, eklass, args, FALSE);
5035 MonoInst *ins, *addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
5036 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &eklass->byval_arg, addr->dreg, 0);
5042 is_unsafe_mov_compatible (MonoCompile *cfg, MonoClass *param_klass, MonoClass *return_klass)
5045 int param_size, return_size;
5047 param_klass = mono_class_from_mono_type (mini_get_underlying_type (¶m_klass->byval_arg));
5048 return_klass = mono_class_from_mono_type (mini_get_underlying_type (&return_klass->byval_arg));
5050 if (cfg->verbose_level > 3)
5051 printf ("[UNSAFE-MOV-INTRISIC] %s <- %s\n", return_klass->name, param_klass->name);
5053 //Don't allow mixing reference types with value types
5054 if (param_klass->valuetype != return_klass->valuetype) {
5055 if (cfg->verbose_level > 3)
5056 printf ("[UNSAFE-MOV-INTRISIC]\tone of the args is a valuetype and the other is not\n");
5060 if (!param_klass->valuetype) {
5061 if (cfg->verbose_level > 3)
5062 printf ("[UNSAFE-MOV-INTRISIC]\targs are reference types\n");
5067 if (param_klass->has_references || return_klass->has_references)
5070 /* Avoid mixing structs and primitive types/enums, they need to be handled differently in the JIT */
5071 if ((MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && !MONO_TYPE_ISSTRUCT (&return_klass->byval_arg)) ||
5072 (!MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && MONO_TYPE_ISSTRUCT (&return_klass->byval_arg))) {
5073 if (cfg->verbose_level > 3)
5074 printf ("[UNSAFE-MOV-INTRISIC]\tmixing structs and scalars\n");
5078 if (param_klass->byval_arg.type == MONO_TYPE_R4 || param_klass->byval_arg.type == MONO_TYPE_R8 ||
5079 return_klass->byval_arg.type == MONO_TYPE_R4 || return_klass->byval_arg.type == MONO_TYPE_R8) {
5080 if (cfg->verbose_level > 3)
5081 printf ("[UNSAFE-MOV-INTRISIC]\tfloat or double are not supported\n");
5085 param_size = mono_class_value_size (param_klass, &align);
5086 return_size = mono_class_value_size (return_klass, &align);
5088 //We can do it if sizes match
5089 if (param_size == return_size) {
5090 if (cfg->verbose_level > 3)
5091 printf ("[UNSAFE-MOV-INTRISIC]\tsame size\n");
5095 //No simple way to handle struct if sizes don't match
5096 if (MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg)) {
5097 if (cfg->verbose_level > 3)
5098 printf ("[UNSAFE-MOV-INTRISIC]\tsize mismatch and type is a struct\n");
5103 * Same reg size category.
5104 * A quick note on why we don't require widening here.
5105 * The intrinsic is "R Array.UnsafeMov<S,R> (S s)".
5107 * Since the source value comes from a function argument, the JIT will already have
5108 * the value in a VREG and performed any widening needed before (say, when loading from a field).
5110 if (param_size <= 4 && return_size <= 4) {
5111 if (cfg->verbose_level > 3)
5112 printf ("[UNSAFE-MOV-INTRISIC]\tsize mismatch but both are of the same reg class\n");
5120 emit_array_unsafe_mov (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args)
5122 MonoClass *param_klass = mono_class_from_mono_type (fsig->params [0]);
5123 MonoClass *return_klass = mono_class_from_mono_type (fsig->ret);
5125 if (mini_is_gsharedvt_variable_type (fsig->ret))
5128 //Valuetypes that are semantically equivalent or numbers than can be widened to
5129 if (is_unsafe_mov_compatible (cfg, param_klass, return_klass))
5132 //Arrays of valuetypes that are semantically equivalent
5133 if (param_klass->rank == 1 && return_klass->rank == 1 && is_unsafe_mov_compatible (cfg, param_klass->element_class, return_klass->element_class))
5140 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5142 #ifdef MONO_ARCH_SIMD_INTRINSICS
5143 MonoInst *ins = NULL;
5145 if (cfg->opt & MONO_OPT_SIMD) {
5146 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
5152 return mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
5156 emit_memory_barrier (MonoCompile *cfg, int kind)
5158 MonoInst *ins = NULL;
5159 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
5160 MONO_ADD_INS (cfg->cbb, ins);
5161 ins->backend.memory_barrier_kind = kind;
5167 llvm_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5169 MonoInst *ins = NULL;
5172 /* The LLVM backend supports these intrinsics */
5173 if (cmethod->klass == mono_defaults.math_class) {
5174 if (strcmp (cmethod->name, "Sin") == 0) {
5176 } else if (strcmp (cmethod->name, "Cos") == 0) {
5178 } else if (strcmp (cmethod->name, "Sqrt") == 0) {
5180 } else if (strcmp (cmethod->name, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8) {
5184 if (opcode && fsig->param_count == 1) {
5185 MONO_INST_NEW (cfg, ins, opcode);
5186 ins->type = STACK_R8;
5187 ins->dreg = mono_alloc_dreg (cfg, ins->type);
5188 ins->sreg1 = args [0]->dreg;
5189 MONO_ADD_INS (cfg->cbb, ins);
5193 if (cfg->opt & MONO_OPT_CMOV) {
5194 if (strcmp (cmethod->name, "Min") == 0) {
5195 if (fsig->params [0]->type == MONO_TYPE_I4)
5197 if (fsig->params [0]->type == MONO_TYPE_U4)
5198 opcode = OP_IMIN_UN;
5199 else if (fsig->params [0]->type == MONO_TYPE_I8)
5201 else if (fsig->params [0]->type == MONO_TYPE_U8)
5202 opcode = OP_LMIN_UN;
5203 } else if (strcmp (cmethod->name, "Max") == 0) {
5204 if (fsig->params [0]->type == MONO_TYPE_I4)
5206 if (fsig->params [0]->type == MONO_TYPE_U4)
5207 opcode = OP_IMAX_UN;
5208 else if (fsig->params [0]->type == MONO_TYPE_I8)
5210 else if (fsig->params [0]->type == MONO_TYPE_U8)
5211 opcode = OP_LMAX_UN;
5215 if (opcode && fsig->param_count == 2) {
5216 MONO_INST_NEW (cfg, ins, opcode);
5217 ins->type = fsig->params [0]->type == MONO_TYPE_I4 ? STACK_I4 : STACK_I8;
5218 ins->dreg = mono_alloc_dreg (cfg, ins->type);
5219 ins->sreg1 = args [0]->dreg;
5220 ins->sreg2 = args [1]->dreg;
5221 MONO_ADD_INS (cfg->cbb, ins);
5229 mini_emit_inst_for_sharable_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5231 if (cmethod->klass == mono_defaults.array_class) {
5232 if (strcmp (cmethod->name, "UnsafeStore") == 0)
5233 return emit_array_unsafe_access (cfg, fsig, args, TRUE);
5234 else if (strcmp (cmethod->name, "UnsafeLoad") == 0)
5235 return emit_array_unsafe_access (cfg, fsig, args, FALSE);
5236 else if (strcmp (cmethod->name, "UnsafeMov") == 0)
5237 return emit_array_unsafe_mov (cfg, fsig, args);
5244 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5246 MonoInst *ins = NULL;
5247 MonoClass *runtime_helpers_class = mono_class_get_runtime_helpers_class ();
5249 if (cmethod->klass == mono_defaults.string_class) {
5250 if (strcmp (cmethod->name, "get_Chars") == 0 && fsig->param_count + fsig->hasthis == 2) {
5251 int dreg = alloc_ireg (cfg);
5252 int index_reg = alloc_preg (cfg);
5253 int add_reg = alloc_preg (cfg);
5255 #if SIZEOF_REGISTER == 8
5256 if (COMPILE_LLVM (cfg)) {
5257 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, args [1]->dreg);
5259 /* The array reg is 64 bits but the index reg is only 32 */
5260 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
5263 index_reg = args [1]->dreg;
5265 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
5267 #if defined(TARGET_X86) || defined(TARGET_AMD64)
5268 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, MONO_STRUCT_OFFSET (MonoString, chars));
5269 add_reg = ins->dreg;
5270 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5273 int mult_reg = alloc_preg (cfg);
5274 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
5275 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
5276 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5277 add_reg, MONO_STRUCT_OFFSET (MonoString, chars));
5279 type_from_op (cfg, ins, NULL, NULL);
5281 } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count + fsig->hasthis == 1) {
5282 int dreg = alloc_ireg (cfg);
5283 /* Decompose later to allow more optimizations */
5284 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
5285 ins->type = STACK_I4;
5286 ins->flags |= MONO_INST_FAULT;
5287 cfg->cbb->has_array_access = TRUE;
5288 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
5293 } else if (cmethod->klass == mono_defaults.object_class) {
5294 if (strcmp (cmethod->name, "GetType") == 0 && fsig->param_count + fsig->hasthis == 1) {
5295 int dreg = alloc_ireg_ref (cfg);
5296 int vt_reg = alloc_preg (cfg);
5297 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
5298 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, MONO_STRUCT_OFFSET (MonoVTable, type));
5299 type_from_op (cfg, ins, NULL, NULL);
5302 } else if (!cfg->backend->emulate_mul_div && strcmp (cmethod->name, "InternalGetHashCode") == 0 && fsig->param_count == 1 && !mono_gc_is_moving ()) {
5303 int dreg = alloc_ireg (cfg);
5304 int t1 = alloc_ireg (cfg);
5306 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
5307 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
5308 ins->type = STACK_I4;
5311 } else if (strcmp (cmethod->name, ".ctor") == 0 && fsig->param_count == 0) {
5312 MONO_INST_NEW (cfg, ins, OP_NOP);
5313 MONO_ADD_INS (cfg->cbb, ins);
5317 } else if (cmethod->klass == mono_defaults.array_class) {
5318 if (strcmp (cmethod->name, "GetGenericValueImpl") == 0 && fsig->param_count + fsig->hasthis == 3 && !cfg->gsharedvt)
5319 return emit_array_generic_access (cfg, fsig, args, FALSE);
5320 else if (strcmp (cmethod->name, "SetGenericValueImpl") == 0 && fsig->param_count + fsig->hasthis == 3 && !cfg->gsharedvt)
5321 return emit_array_generic_access (cfg, fsig, args, TRUE);
5323 #ifndef MONO_BIG_ARRAYS
5325 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
5328 else if (((strcmp (cmethod->name, "GetLength") == 0 && fsig->param_count + fsig->hasthis == 2) ||
5329 (strcmp (cmethod->name, "GetLowerBound") == 0 && fsig->param_count + fsig->hasthis == 2)) &&
5330 args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
5331 int dreg = alloc_ireg (cfg);
5332 int bounds_reg = alloc_ireg_mp (cfg);
5333 MonoBasicBlock *end_bb, *szarray_bb;
5334 gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
5336 NEW_BBLOCK (cfg, end_bb);
5337 NEW_BBLOCK (cfg, szarray_bb);
5339 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
5340 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
5341 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
5342 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
5343 /* Non-szarray case */
5345 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5346 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5348 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5349 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5350 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
5351 MONO_START_BB (cfg, szarray_bb);
5354 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5355 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
5357 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
5358 MONO_START_BB (cfg, end_bb);
5360 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
5361 ins->type = STACK_I4;
5367 if (cmethod->name [0] != 'g')
5370 if (strcmp (cmethod->name, "get_Rank") == 0 && fsig->param_count + fsig->hasthis == 1) {
5371 int dreg = alloc_ireg (cfg);
5372 int vtable_reg = alloc_preg (cfg);
5373 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
5374 args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
5375 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
5376 vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
5377 type_from_op (cfg, ins, NULL, NULL);
5380 } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count + fsig->hasthis == 1) {
5381 int dreg = alloc_ireg (cfg);
5383 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5384 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
5385 type_from_op (cfg, ins, NULL, NULL);
5390 } else if (cmethod->klass == runtime_helpers_class) {
5391 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0 && fsig->param_count == 0) {
5392 EMIT_NEW_ICONST (cfg, ins, MONO_STRUCT_OFFSET (MonoString, chars));
5394 } else if (strcmp (cmethod->name, "IsReferenceOrContainsReferences") == 0 && fsig->param_count == 0) {
5395 MonoGenericContext *ctx = mono_method_get_context (cmethod);
5397 g_assert (ctx->method_inst);
5398 g_assert (ctx->method_inst->type_argc == 1);
5399 MonoType *t = mini_get_underlying_type (ctx->method_inst->type_argv [0]);
5400 MonoClass *klass = mono_class_from_mono_type (t);
5404 mono_class_init (klass);
5405 if (MONO_TYPE_IS_REFERENCE (t))
5406 EMIT_NEW_ICONST (cfg, ins, 1);
5407 else if (MONO_TYPE_IS_PRIMITIVE (t))
5408 EMIT_NEW_ICONST (cfg, ins, 0);
5409 else if (cfg->gshared && (t->type == MONO_TYPE_VAR || t->type == MONO_TYPE_MVAR) && !mini_type_var_is_vt (t))
5410 EMIT_NEW_ICONST (cfg, ins, 1);
5411 else if (!cfg->gshared || !mini_class_check_context_used (cfg, klass))
5412 EMIT_NEW_ICONST (cfg, ins, klass->has_references ? 1 : 0);
5414 g_assert (cfg->gshared);
5416 int context_used = mini_class_check_context_used (cfg, klass);
5418 /* This returns 1 or 2 */
5419 MonoInst *info = mini_emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CLASS_IS_REF_OR_CONTAINS_REFS);
5420 int dreg = alloc_ireg (cfg);
5421 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ISUB_IMM, dreg, info->dreg, 1);
5427 } else if (cmethod->klass == mono_defaults.monitor_class) {
5428 gboolean is_enter = FALSE;
5429 gboolean is_v4 = FALSE;
5431 if (!strcmp (cmethod->name, "Enter") && fsig->param_count == 2 && fsig->params [1]->byref) {
5435 if (!strcmp (cmethod->name, "Enter") && fsig->param_count == 1)
5440 * To make async stack traces work, icalls which can block should have a wrapper.
5441 * For Monitor.Enter, emit two calls: a fastpath which doesn't have a wrapper, and a slowpath, which does.
5443 MonoBasicBlock *end_bb;
5445 NEW_BBLOCK (cfg, end_bb);
5447 ins = mono_emit_jit_icall (cfg, is_v4 ? (gpointer)mono_monitor_enter_v4_fast : (gpointer)mono_monitor_enter_fast, args);
5448 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, ins->dreg, 0);
5449 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBNE_UN, end_bb);
5450 ins = mono_emit_jit_icall (cfg, is_v4 ? (gpointer)mono_monitor_enter_v4_internal : (gpointer)mono_monitor_enter_internal, args);
5451 MONO_START_BB (cfg, end_bb);
5454 } else if (cmethod->klass == mono_defaults.thread_class) {
5455 if (strcmp (cmethod->name, "SpinWait_nop") == 0 && fsig->param_count == 0) {
5456 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
5457 MONO_ADD_INS (cfg->cbb, ins);
5459 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0) {
5460 return emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5461 } else if (!strcmp (cmethod->name, "VolatileRead") && fsig->param_count == 1) {
5463 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
5465 if (fsig->params [0]->type == MONO_TYPE_I1)
5466 opcode = OP_LOADI1_MEMBASE;
5467 else if (fsig->params [0]->type == MONO_TYPE_U1)
5468 opcode = OP_LOADU1_MEMBASE;
5469 else if (fsig->params [0]->type == MONO_TYPE_I2)
5470 opcode = OP_LOADI2_MEMBASE;
5471 else if (fsig->params [0]->type == MONO_TYPE_U2)
5472 opcode = OP_LOADU2_MEMBASE;
5473 else if (fsig->params [0]->type == MONO_TYPE_I4)
5474 opcode = OP_LOADI4_MEMBASE;
5475 else if (fsig->params [0]->type == MONO_TYPE_U4)
5476 opcode = OP_LOADU4_MEMBASE;
5477 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
5478 opcode = OP_LOADI8_MEMBASE;
5479 else if (fsig->params [0]->type == MONO_TYPE_R4)
5480 opcode = OP_LOADR4_MEMBASE;
5481 else if (fsig->params [0]->type == MONO_TYPE_R8)
5482 opcode = OP_LOADR8_MEMBASE;
5483 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
5484 opcode = OP_LOAD_MEMBASE;
5487 MONO_INST_NEW (cfg, ins, opcode);
5488 ins->inst_basereg = args [0]->dreg;
5489 ins->inst_offset = 0;
5490 MONO_ADD_INS (cfg->cbb, ins);
5492 switch (fsig->params [0]->type) {
5499 ins->dreg = mono_alloc_ireg (cfg);
5500 ins->type = STACK_I4;
5504 ins->dreg = mono_alloc_lreg (cfg);
5505 ins->type = STACK_I8;
5509 ins->dreg = mono_alloc_ireg (cfg);
5510 #if SIZEOF_REGISTER == 8
5511 ins->type = STACK_I8;
5513 ins->type = STACK_I4;
5518 ins->dreg = mono_alloc_freg (cfg);
5519 ins->type = STACK_R8;
5522 g_assert (mini_type_is_reference (fsig->params [0]));
5523 ins->dreg = mono_alloc_ireg_ref (cfg);
5524 ins->type = STACK_OBJ;
5528 if (opcode == OP_LOADI8_MEMBASE)
5529 ins = mono_decompose_opcode (cfg, ins);
5531 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5535 } else if (!strcmp (cmethod->name, "VolatileWrite") && fsig->param_count == 2) {
5537 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
5539 if (fsig->params [0]->type == MONO_TYPE_I1 || fsig->params [0]->type == MONO_TYPE_U1)
5540 opcode = OP_STOREI1_MEMBASE_REG;
5541 else if (fsig->params [0]->type == MONO_TYPE_I2 || fsig->params [0]->type == MONO_TYPE_U2)
5542 opcode = OP_STOREI2_MEMBASE_REG;
5543 else if (fsig->params [0]->type == MONO_TYPE_I4 || fsig->params [0]->type == MONO_TYPE_U4)
5544 opcode = OP_STOREI4_MEMBASE_REG;
5545 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
5546 opcode = OP_STOREI8_MEMBASE_REG;
5547 else if (fsig->params [0]->type == MONO_TYPE_R4)
5548 opcode = OP_STORER4_MEMBASE_REG;
5549 else if (fsig->params [0]->type == MONO_TYPE_R8)
5550 opcode = OP_STORER8_MEMBASE_REG;
5551 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
5552 opcode = OP_STORE_MEMBASE_REG;
5555 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5557 MONO_INST_NEW (cfg, ins, opcode);
5558 ins->sreg1 = args [1]->dreg;
5559 ins->inst_destbasereg = args [0]->dreg;
5560 ins->inst_offset = 0;
5561 MONO_ADD_INS (cfg->cbb, ins);
5563 if (opcode == OP_STOREI8_MEMBASE_REG)
5564 ins = mono_decompose_opcode (cfg, ins);
5569 } else if (cmethod->klass->image == mono_defaults.corlib &&
5570 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
5571 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
5574 #if SIZEOF_REGISTER == 8
5575 if (!cfg->llvm_only && strcmp (cmethod->name, "Read") == 0 && fsig->param_count == 1 && (fsig->params [0]->type == MONO_TYPE_I8)) {
5576 if (!cfg->llvm_only && mono_arch_opcode_supported (OP_ATOMIC_LOAD_I8)) {
5577 MONO_INST_NEW (cfg, ins, OP_ATOMIC_LOAD_I8);
5578 ins->dreg = mono_alloc_preg (cfg);
5579 ins->sreg1 = args [0]->dreg;
5580 ins->type = STACK_I8;
5581 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_SEQ;
5582 MONO_ADD_INS (cfg->cbb, ins);
5586 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5588 /* 64 bit reads are already atomic */
5589 MONO_INST_NEW (cfg, load_ins, OP_LOADI8_MEMBASE);
5590 load_ins->dreg = mono_alloc_preg (cfg);
5591 load_ins->inst_basereg = args [0]->dreg;
5592 load_ins->inst_offset = 0;
5593 load_ins->type = STACK_I8;
5594 MONO_ADD_INS (cfg->cbb, load_ins);
5596 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5603 if (strcmp (cmethod->name, "Increment") == 0 && fsig->param_count == 1) {
5604 MonoInst *ins_iconst;
5607 if (fsig->params [0]->type == MONO_TYPE_I4) {
5608 opcode = OP_ATOMIC_ADD_I4;
5609 cfg->has_atomic_add_i4 = TRUE;
5611 #if SIZEOF_REGISTER == 8
5612 else if (fsig->params [0]->type == MONO_TYPE_I8)
5613 opcode = OP_ATOMIC_ADD_I8;
5616 if (!mono_arch_opcode_supported (opcode))
5618 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
5619 ins_iconst->inst_c0 = 1;
5620 ins_iconst->dreg = mono_alloc_ireg (cfg);
5621 MONO_ADD_INS (cfg->cbb, ins_iconst);
5623 MONO_INST_NEW (cfg, ins, opcode);
5624 ins->dreg = mono_alloc_ireg (cfg);
5625 ins->inst_basereg = args [0]->dreg;
5626 ins->inst_offset = 0;
5627 ins->sreg2 = ins_iconst->dreg;
5628 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
5629 MONO_ADD_INS (cfg->cbb, ins);
5631 } else if (strcmp (cmethod->name, "Decrement") == 0 && fsig->param_count == 1) {
5632 MonoInst *ins_iconst;
5635 if (fsig->params [0]->type == MONO_TYPE_I4) {
5636 opcode = OP_ATOMIC_ADD_I4;
5637 cfg->has_atomic_add_i4 = TRUE;
5639 #if SIZEOF_REGISTER == 8
5640 else if (fsig->params [0]->type == MONO_TYPE_I8)
5641 opcode = OP_ATOMIC_ADD_I8;
5644 if (!mono_arch_opcode_supported (opcode))
5646 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
5647 ins_iconst->inst_c0 = -1;
5648 ins_iconst->dreg = mono_alloc_ireg (cfg);
5649 MONO_ADD_INS (cfg->cbb, ins_iconst);
5651 MONO_INST_NEW (cfg, ins, opcode);
5652 ins->dreg = mono_alloc_ireg (cfg);
5653 ins->inst_basereg = args [0]->dreg;
5654 ins->inst_offset = 0;
5655 ins->sreg2 = ins_iconst->dreg;
5656 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
5657 MONO_ADD_INS (cfg->cbb, ins);
5659 } else if (strcmp (cmethod->name, "Add") == 0 && fsig->param_count == 2) {
5662 if (fsig->params [0]->type == MONO_TYPE_I4) {
5663 opcode = OP_ATOMIC_ADD_I4;
5664 cfg->has_atomic_add_i4 = TRUE;
5666 #if SIZEOF_REGISTER == 8
5667 else if (fsig->params [0]->type == MONO_TYPE_I8)
5668 opcode = OP_ATOMIC_ADD_I8;
5671 if (!mono_arch_opcode_supported (opcode))
5673 MONO_INST_NEW (cfg, ins, opcode);
5674 ins->dreg = mono_alloc_ireg (cfg);
5675 ins->inst_basereg = args [0]->dreg;
5676 ins->inst_offset = 0;
5677 ins->sreg2 = args [1]->dreg;
5678 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
5679 MONO_ADD_INS (cfg->cbb, ins);
5682 else if (strcmp (cmethod->name, "Exchange") == 0 && fsig->param_count == 2) {
5683 MonoInst *f2i = NULL, *i2f;
5684 guint32 opcode, f2i_opcode, i2f_opcode;
5685 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
5686 gboolean is_float = fsig->params [0]->type == MONO_TYPE_R4 || fsig->params [0]->type == MONO_TYPE_R8;
5688 if (fsig->params [0]->type == MONO_TYPE_I4 ||
5689 fsig->params [0]->type == MONO_TYPE_R4) {
5690 opcode = OP_ATOMIC_EXCHANGE_I4;
5691 f2i_opcode = OP_MOVE_F_TO_I4;
5692 i2f_opcode = OP_MOVE_I4_TO_F;
5693 cfg->has_atomic_exchange_i4 = TRUE;
5695 #if SIZEOF_REGISTER == 8
5697 fsig->params [0]->type == MONO_TYPE_I8 ||
5698 fsig->params [0]->type == MONO_TYPE_R8 ||
5699 fsig->params [0]->type == MONO_TYPE_I) {
5700 opcode = OP_ATOMIC_EXCHANGE_I8;
5701 f2i_opcode = OP_MOVE_F_TO_I8;
5702 i2f_opcode = OP_MOVE_I8_TO_F;
5705 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I) {
5706 opcode = OP_ATOMIC_EXCHANGE_I4;
5707 cfg->has_atomic_exchange_i4 = TRUE;
5713 if (!mono_arch_opcode_supported (opcode))
5717 /* TODO: Decompose these opcodes instead of bailing here. */
5718 if (COMPILE_SOFT_FLOAT (cfg))
5721 MONO_INST_NEW (cfg, f2i, f2i_opcode);
5722 f2i->dreg = mono_alloc_ireg (cfg);
5723 f2i->sreg1 = args [1]->dreg;
5724 if (f2i_opcode == OP_MOVE_F_TO_I4)
5725 f2i->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
5726 MONO_ADD_INS (cfg->cbb, f2i);
5729 MONO_INST_NEW (cfg, ins, opcode);
5730 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : mono_alloc_ireg (cfg);
5731 ins->inst_basereg = args [0]->dreg;
5732 ins->inst_offset = 0;
5733 ins->sreg2 = is_float ? f2i->dreg : args [1]->dreg;
5734 MONO_ADD_INS (cfg->cbb, ins);
5736 switch (fsig->params [0]->type) {
5738 ins->type = STACK_I4;
5741 ins->type = STACK_I8;
5744 #if SIZEOF_REGISTER == 8
5745 ins->type = STACK_I8;
5747 ins->type = STACK_I4;
5752 ins->type = STACK_R8;
5755 g_assert (mini_type_is_reference (fsig->params [0]));
5756 ins->type = STACK_OBJ;
5761 MONO_INST_NEW (cfg, i2f, i2f_opcode);
5762 i2f->dreg = mono_alloc_freg (cfg);
5763 i2f->sreg1 = ins->dreg;
5764 i2f->type = STACK_R8;
5765 if (i2f_opcode == OP_MOVE_I4_TO_F)
5766 i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
5767 MONO_ADD_INS (cfg->cbb, i2f);
5772 if (cfg->gen_write_barriers && is_ref)
5773 emit_write_barrier (cfg, args [0], args [1]);
5775 else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 3) {
5776 MonoInst *f2i_new = NULL, *f2i_cmp = NULL, *i2f;
5777 guint32 opcode, f2i_opcode, i2f_opcode;
5778 gboolean is_ref = mini_type_is_reference (fsig->params [1]);
5779 gboolean is_float = fsig->params [1]->type == MONO_TYPE_R4 || fsig->params [1]->type == MONO_TYPE_R8;
5781 if (fsig->params [1]->type == MONO_TYPE_I4 ||
5782 fsig->params [1]->type == MONO_TYPE_R4) {
5783 opcode = OP_ATOMIC_CAS_I4;
5784 f2i_opcode = OP_MOVE_F_TO_I4;
5785 i2f_opcode = OP_MOVE_I4_TO_F;
5786 cfg->has_atomic_cas_i4 = TRUE;
5788 #if SIZEOF_REGISTER == 8
5790 fsig->params [1]->type == MONO_TYPE_I8 ||
5791 fsig->params [1]->type == MONO_TYPE_R8 ||
5792 fsig->params [1]->type == MONO_TYPE_I) {
5793 opcode = OP_ATOMIC_CAS_I8;
5794 f2i_opcode = OP_MOVE_F_TO_I8;
5795 i2f_opcode = OP_MOVE_I8_TO_F;
5798 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I) {
5799 opcode = OP_ATOMIC_CAS_I4;
5800 cfg->has_atomic_cas_i4 = TRUE;
5806 if (!mono_arch_opcode_supported (opcode))
5810 /* TODO: Decompose these opcodes instead of bailing here. */
5811 if (COMPILE_SOFT_FLOAT (cfg))
5814 MONO_INST_NEW (cfg, f2i_new, f2i_opcode);
5815 f2i_new->dreg = mono_alloc_ireg (cfg);
5816 f2i_new->sreg1 = args [1]->dreg;
5817 if (f2i_opcode == OP_MOVE_F_TO_I4)
5818 f2i_new->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
5819 MONO_ADD_INS (cfg->cbb, f2i_new);
5821 MONO_INST_NEW (cfg, f2i_cmp, f2i_opcode);
5822 f2i_cmp->dreg = mono_alloc_ireg (cfg);
5823 f2i_cmp->sreg1 = args [2]->dreg;
5824 if (f2i_opcode == OP_MOVE_F_TO_I4)
5825 f2i_cmp->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
5826 MONO_ADD_INS (cfg->cbb, f2i_cmp);
5829 MONO_INST_NEW (cfg, ins, opcode);
5830 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
5831 ins->sreg1 = args [0]->dreg;
5832 ins->sreg2 = is_float ? f2i_new->dreg : args [1]->dreg;
5833 ins->sreg3 = is_float ? f2i_cmp->dreg : args [2]->dreg;
5834 MONO_ADD_INS (cfg->cbb, ins);
5836 switch (fsig->params [1]->type) {
5838 ins->type = STACK_I4;
5841 ins->type = STACK_I8;
5844 #if SIZEOF_REGISTER == 8
5845 ins->type = STACK_I8;
5847 ins->type = STACK_I4;
5851 ins->type = cfg->r4_stack_type;
5854 ins->type = STACK_R8;
5857 g_assert (mini_type_is_reference (fsig->params [1]));
5858 ins->type = STACK_OBJ;
5863 MONO_INST_NEW (cfg, i2f, i2f_opcode);
5864 i2f->dreg = mono_alloc_freg (cfg);
5865 i2f->sreg1 = ins->dreg;
5866 i2f->type = STACK_R8;
5867 if (i2f_opcode == OP_MOVE_I4_TO_F)
5868 i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
5869 MONO_ADD_INS (cfg->cbb, i2f);
5874 if (cfg->gen_write_barriers && is_ref)
5875 emit_write_barrier (cfg, args [0], args [1]);
5877 else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 4 &&
5878 fsig->params [1]->type == MONO_TYPE_I4) {
5879 MonoInst *cmp, *ceq;
5881 if (!mono_arch_opcode_supported (OP_ATOMIC_CAS_I4))
5884 /* int32 r = CAS (location, value, comparand); */
5885 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
5886 ins->dreg = alloc_ireg (cfg);
5887 ins->sreg1 = args [0]->dreg;
5888 ins->sreg2 = args [1]->dreg;
5889 ins->sreg3 = args [2]->dreg;
5890 ins->type = STACK_I4;
5891 MONO_ADD_INS (cfg->cbb, ins);
5893 /* bool result = r == comparand; */
5894 MONO_INST_NEW (cfg, cmp, OP_ICOMPARE);
5895 cmp->sreg1 = ins->dreg;
5896 cmp->sreg2 = args [2]->dreg;
5897 cmp->type = STACK_I4;
5898 MONO_ADD_INS (cfg->cbb, cmp);
5900 MONO_INST_NEW (cfg, ceq, OP_ICEQ);
5901 ceq->dreg = alloc_ireg (cfg);
5902 ceq->type = STACK_I4;
5903 MONO_ADD_INS (cfg->cbb, ceq);
5905 /* *success = result; */
5906 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, args [3]->dreg, 0, ceq->dreg);
5908 cfg->has_atomic_cas_i4 = TRUE;
5910 else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0)
5911 ins = emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5915 } else if (cmethod->klass->image == mono_defaults.corlib &&
5916 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
5917 (strcmp (cmethod->klass->name, "Volatile") == 0)) {
5920 if (!cfg->llvm_only && !strcmp (cmethod->name, "Read") && fsig->param_count == 1) {
5922 MonoType *t = fsig->params [0];
5924 gboolean is_float = t->type == MONO_TYPE_R4 || t->type == MONO_TYPE_R8;
5926 g_assert (t->byref);
5927 /* t is a byref type, so the reference check is more complicated */
5928 is_ref = mini_type_is_reference (&mono_class_from_mono_type (t)->byval_arg);
5929 if (t->type == MONO_TYPE_I1)
5930 opcode = OP_ATOMIC_LOAD_I1;
5931 else if (t->type == MONO_TYPE_U1 || t->type == MONO_TYPE_BOOLEAN)
5932 opcode = OP_ATOMIC_LOAD_U1;
5933 else if (t->type == MONO_TYPE_I2)
5934 opcode = OP_ATOMIC_LOAD_I2;
5935 else if (t->type == MONO_TYPE_U2)
5936 opcode = OP_ATOMIC_LOAD_U2;
5937 else if (t->type == MONO_TYPE_I4)
5938 opcode = OP_ATOMIC_LOAD_I4;
5939 else if (t->type == MONO_TYPE_U4)
5940 opcode = OP_ATOMIC_LOAD_U4;
5941 else if (t->type == MONO_TYPE_R4)
5942 opcode = OP_ATOMIC_LOAD_R4;
5943 else if (t->type == MONO_TYPE_R8)
5944 opcode = OP_ATOMIC_LOAD_R8;
5945 #if SIZEOF_REGISTER == 8
5946 else if (t->type == MONO_TYPE_I8 || t->type == MONO_TYPE_I)
5947 opcode = OP_ATOMIC_LOAD_I8;
5948 else if (is_ref || t->type == MONO_TYPE_U8 || t->type == MONO_TYPE_U)
5949 opcode = OP_ATOMIC_LOAD_U8;
5951 else if (t->type == MONO_TYPE_I)
5952 opcode = OP_ATOMIC_LOAD_I4;
5953 else if (is_ref || t->type == MONO_TYPE_U)
5954 opcode = OP_ATOMIC_LOAD_U4;
5958 if (!mono_arch_opcode_supported (opcode))
5961 MONO_INST_NEW (cfg, ins, opcode);
5962 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : (is_float ? mono_alloc_freg (cfg) : mono_alloc_ireg (cfg));
5963 ins->sreg1 = args [0]->dreg;
5964 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_ACQ;
5965 MONO_ADD_INS (cfg->cbb, ins);
5968 case MONO_TYPE_BOOLEAN:
5975 ins->type = STACK_I4;
5979 ins->type = STACK_I8;
5983 #if SIZEOF_REGISTER == 8
5984 ins->type = STACK_I8;
5986 ins->type = STACK_I4;
5990 ins->type = cfg->r4_stack_type;
5993 ins->type = STACK_R8;
5997 ins->type = STACK_OBJ;
6003 if (!cfg->llvm_only && !strcmp (cmethod->name, "Write") && fsig->param_count == 2) {
6005 MonoType *t = fsig->params [0];
6008 g_assert (t->byref);
6009 is_ref = mini_type_is_reference (&mono_class_from_mono_type (t)->byval_arg);
6010 if (t->type == MONO_TYPE_I1)
6011 opcode = OP_ATOMIC_STORE_I1;
6012 else if (t->type == MONO_TYPE_U1 || t->type == MONO_TYPE_BOOLEAN)
6013 opcode = OP_ATOMIC_STORE_U1;
6014 else if (t->type == MONO_TYPE_I2)
6015 opcode = OP_ATOMIC_STORE_I2;
6016 else if (t->type == MONO_TYPE_U2)
6017 opcode = OP_ATOMIC_STORE_U2;
6018 else if (t->type == MONO_TYPE_I4)
6019 opcode = OP_ATOMIC_STORE_I4;
6020 else if (t->type == MONO_TYPE_U4)
6021 opcode = OP_ATOMIC_STORE_U4;
6022 else if (t->type == MONO_TYPE_R4)
6023 opcode = OP_ATOMIC_STORE_R4;
6024 else if (t->type == MONO_TYPE_R8)
6025 opcode = OP_ATOMIC_STORE_R8;
6026 #if SIZEOF_REGISTER == 8
6027 else if (t->type == MONO_TYPE_I8 || t->type == MONO_TYPE_I)
6028 opcode = OP_ATOMIC_STORE_I8;
6029 else if (is_ref || t->type == MONO_TYPE_U8 || t->type == MONO_TYPE_U)
6030 opcode = OP_ATOMIC_STORE_U8;
6032 else if (t->type == MONO_TYPE_I)
6033 opcode = OP_ATOMIC_STORE_I4;
6034 else if (is_ref || t->type == MONO_TYPE_U)
6035 opcode = OP_ATOMIC_STORE_U4;
6039 if (!mono_arch_opcode_supported (opcode))
6042 MONO_INST_NEW (cfg, ins, opcode);
6043 ins->dreg = args [0]->dreg;
6044 ins->sreg1 = args [1]->dreg;
6045 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_REL;
6046 MONO_ADD_INS (cfg->cbb, ins);
6048 if (cfg->gen_write_barriers && is_ref)
6049 emit_write_barrier (cfg, args [0], args [1]);
6055 } else if (cmethod->klass->image == mono_defaults.corlib &&
6056 (strcmp (cmethod->klass->name_space, "System.Diagnostics") == 0) &&
6057 (strcmp (cmethod->klass->name, "Debugger") == 0)) {
6058 if (!strcmp (cmethod->name, "Break") && fsig->param_count == 0) {
6059 if (should_insert_brekpoint (cfg->method)) {
6060 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
6062 MONO_INST_NEW (cfg, ins, OP_NOP);
6063 MONO_ADD_INS (cfg->cbb, ins);
6067 } else if (cmethod->klass->image == mono_defaults.corlib &&
6068 (strcmp (cmethod->klass->name_space, "System") == 0) &&
6069 (strcmp (cmethod->klass->name, "Environment") == 0)) {
6070 if (!strcmp (cmethod->name, "get_IsRunningOnWindows") && fsig->param_count == 0) {
6072 EMIT_NEW_ICONST (cfg, ins, 1);
6074 EMIT_NEW_ICONST (cfg, ins, 0);
6077 } else if (cmethod->klass->image == mono_defaults.corlib &&
6078 (strcmp (cmethod->klass->name_space, "System.Reflection") == 0) &&
6079 (strcmp (cmethod->klass->name, "Assembly") == 0)) {
6080 if (cfg->llvm_only && !strcmp (cmethod->name, "GetExecutingAssembly")) {
6081 /* No stack walks are currently available, so implement this as an intrinsic */
6082 MonoInst *assembly_ins;
6084 EMIT_NEW_AOTCONST (cfg, assembly_ins, MONO_PATCH_INFO_IMAGE, cfg->method->klass->image);
6085 ins = mono_emit_jit_icall (cfg, mono_get_assembly_object, &assembly_ins);
6088 } else if (cmethod->klass->image == mono_defaults.corlib &&
6089 (strcmp (cmethod->klass->name_space, "System.Reflection") == 0) &&
6090 (strcmp (cmethod->klass->name, "MethodBase") == 0)) {
6091 if (cfg->llvm_only && !strcmp (cmethod->name, "GetCurrentMethod")) {
6092 /* No stack walks are currently available, so implement this as an intrinsic */
6093 MonoInst *method_ins;
6094 MonoMethod *declaring = cfg->method;
6096 /* This returns the declaring generic method */
6097 if (declaring->is_inflated)
6098 declaring = ((MonoMethodInflated*)cfg->method)->declaring;
6099 EMIT_NEW_AOTCONST (cfg, method_ins, MONO_PATCH_INFO_METHODCONST, declaring);
6100 ins = mono_emit_jit_icall (cfg, mono_get_method_object, &method_ins);
6101 cfg->no_inline = TRUE;
6102 if (cfg->method != cfg->current_method)
6103 inline_failure (cfg, "MethodBase:GetCurrentMethod ()");
6106 } else if (cmethod->klass == mono_defaults.math_class) {
6108 * There is general branchless code for Min/Max, but it does not work for
6110 * http://everything2.com/?node_id=1051618
6112 } else if (cmethod->klass == mono_defaults.systemtype_class && !strcmp (cmethod->name, "op_Equality")) {
6113 EMIT_NEW_BIALU (cfg, ins, OP_COMPARE, -1, args [0]->dreg, args [1]->dreg);
6114 MONO_INST_NEW (cfg, ins, OP_PCEQ);
6115 ins->dreg = alloc_preg (cfg);
6116 ins->type = STACK_I4;
6117 MONO_ADD_INS (cfg->cbb, ins);
6119 } else if (((!strcmp (cmethod->klass->image->assembly->aname.name, "MonoMac") ||
6120 !strcmp (cmethod->klass->image->assembly->aname.name, "monotouch")) &&
6121 !strcmp (cmethod->klass->name_space, "XamCore.ObjCRuntime") &&
6122 !strcmp (cmethod->klass->name, "Selector")) ||
6123 ((!strcmp (cmethod->klass->image->assembly->aname.name, "Xamarin.iOS") ||
6124 !strcmp (cmethod->klass->image->assembly->aname.name, "Xamarin.Mac")) &&
6125 !strcmp (cmethod->klass->name_space, "ObjCRuntime") &&
6126 !strcmp (cmethod->klass->name, "Selector"))
6128 if ((cfg->backend->have_objc_get_selector || cfg->compile_llvm) &&
6129 !strcmp (cmethod->name, "GetHandle") && fsig->param_count == 1 &&
6130 (args [0]->opcode == OP_GOT_ENTRY || args [0]->opcode == OP_AOTCONST) &&
6133 MonoJumpInfoToken *ji;
6136 if (args [0]->opcode == OP_GOT_ENTRY) {
6137 pi = (MonoInst *)args [0]->inst_p1;
6138 g_assert (pi->opcode == OP_PATCH_INFO);
6139 g_assert (GPOINTER_TO_INT (pi->inst_p1) == MONO_PATCH_INFO_LDSTR);
6140 ji = (MonoJumpInfoToken *)pi->inst_p0;
6142 g_assert (GPOINTER_TO_INT (args [0]->inst_p1) == MONO_PATCH_INFO_LDSTR);
6143 ji = (MonoJumpInfoToken *)args [0]->inst_p0;
6146 NULLIFY_INS (args [0]);
6148 s = mono_ldstr_utf8 (ji->image, mono_metadata_token_index (ji->token), &cfg->error);
6149 return_val_if_nok (&cfg->error, NULL);
6151 MONO_INST_NEW (cfg, ins, OP_OBJC_GET_SELECTOR);
6152 ins->dreg = mono_alloc_ireg (cfg);
6155 MONO_ADD_INS (cfg->cbb, ins);
6160 #ifdef MONO_ARCH_SIMD_INTRINSICS
6161 if (cfg->opt & MONO_OPT_SIMD) {
6162 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
6168 ins = mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
6172 if (COMPILE_LLVM (cfg)) {
6173 ins = llvm_emit_inst_for_method (cfg, cmethod, fsig, args);
6178 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
6182 * This entry point could be used later for arbitrary method
6185 inline static MonoInst*
6186 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
6187 MonoMethodSignature *signature, MonoInst **args, MonoInst *this_ins)
6189 if (method->klass == mono_defaults.string_class) {
6190 /* managed string allocation support */
6191 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS) && !(cfg->opt & MONO_OPT_SHARED)) {
6192 MonoInst *iargs [2];
6193 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
6194 MonoMethod *managed_alloc = NULL;
6196 g_assert (vtable); /*Should not fail since it System.String*/
6197 #ifndef MONO_CROSS_COMPILE
6198 managed_alloc = mono_gc_get_managed_allocator (method->klass, FALSE, FALSE);
6202 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
6203 iargs [1] = args [0];
6204 return mono_emit_method_call (cfg, managed_alloc, iargs, this_ins);
6211 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
6213 MonoInst *store, *temp;
6216 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
6217 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
6220 * FIXME: We should use *args++ = sp [0], but that would mean the arg
6221 * would be different than the MonoInst's used to represent arguments, and
6222 * the ldelema implementation can't deal with that.
6223 * Solution: When ldelema is used on an inline argument, create a var for
6224 * it, emit ldelema on that var, and emit the saving code below in
6225 * inline_method () if needed.
6227 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
6228 cfg->args [i] = temp;
6229 /* This uses cfg->args [i] which is set by the preceeding line */
6230 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
6231 store->cil_code = sp [0]->cil_code;
6236 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
6237 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
6239 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
6241 check_inline_called_method_name_limit (MonoMethod *called_method)
6244 static const char *limit = NULL;
6246 if (limit == NULL) {
6247 const char *limit_string = g_getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
6249 if (limit_string != NULL)
6250 limit = limit_string;
6255 if (limit [0] != '\0') {
6256 char *called_method_name = mono_method_full_name (called_method, TRUE);
6258 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
6259 g_free (called_method_name);
6261 //return (strncmp_result <= 0);
6262 return (strncmp_result == 0);
6269 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
6271 check_inline_caller_method_name_limit (MonoMethod *caller_method)
6274 static const char *limit = NULL;
6276 if (limit == NULL) {
6277 const char *limit_string = g_getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
6278 if (limit_string != NULL) {
6279 limit = limit_string;
6285 if (limit [0] != '\0') {
6286 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
6288 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
6289 g_free (caller_method_name);
6291 //return (strncmp_result <= 0);
6292 return (strncmp_result == 0);
6300 emit_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
6302 static double r8_0 = 0.0;
6303 static float r4_0 = 0.0;
6307 rtype = mini_get_underlying_type (rtype);
6311 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
6312 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
6313 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
6314 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
6315 MONO_EMIT_NEW_I8CONST (cfg, dreg, 0);
6316 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
6317 MONO_INST_NEW (cfg, ins, OP_R4CONST);
6318 ins->type = STACK_R4;
6319 ins->inst_p0 = (void*)&r4_0;
6321 MONO_ADD_INS (cfg->cbb, ins);
6322 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
6323 MONO_INST_NEW (cfg, ins, OP_R8CONST);
6324 ins->type = STACK_R8;
6325 ins->inst_p0 = (void*)&r8_0;
6327 MONO_ADD_INS (cfg->cbb, ins);
6328 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
6329 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
6330 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
6331 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (rtype)) {
6332 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
6334 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
6339 emit_dummy_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
6343 rtype = mini_get_underlying_type (rtype);
6347 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_PCONST);
6348 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
6349 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_ICONST);
6350 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
6351 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_I8CONST);
6352 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
6353 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R4CONST);
6354 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
6355 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R8CONST);
6356 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
6357 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
6358 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
6359 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (rtype)) {
6360 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
6362 emit_init_rvar (cfg, dreg, rtype);
6366 /* If INIT is FALSE, emit dummy initialization statements to keep the IR valid */
6368 emit_init_local (MonoCompile *cfg, int local, MonoType *type, gboolean init)
6370 MonoInst *var = cfg->locals [local];
6371 if (COMPILE_SOFT_FLOAT (cfg)) {
6373 int reg = alloc_dreg (cfg, (MonoStackType)var->type);
6374 emit_init_rvar (cfg, reg, type);
6375 EMIT_NEW_LOCSTORE (cfg, store, local, cfg->cbb->last_ins);
6378 emit_init_rvar (cfg, var->dreg, type);
6380 emit_dummy_init_rvar (cfg, var->dreg, type);
6385 mini_inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp, guchar *ip, guint real_offset, gboolean inline_always)
6387 return inline_method (cfg, cmethod, fsig, sp, ip, real_offset, inline_always);
6393 * Return the cost of inlining CMETHOD, or zero if it should not be inlined.
6396 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
6397 guchar *ip, guint real_offset, gboolean inline_always)
6400 MonoInst *ins, *rvar = NULL;
6401 MonoMethodHeader *cheader;
6402 MonoBasicBlock *ebblock, *sbblock;
6404 MonoMethod *prev_inlined_method;
6405 MonoInst **prev_locals, **prev_args;
6406 MonoType **prev_arg_types;
6407 guint prev_real_offset;
6408 GHashTable *prev_cbb_hash;
6409 MonoBasicBlock **prev_cil_offset_to_bb;
6410 MonoBasicBlock *prev_cbb;
6411 const unsigned char *prev_ip;
6412 unsigned char *prev_cil_start;
6413 guint32 prev_cil_offset_to_bb_len;
6414 MonoMethod *prev_current_method;
6415 MonoGenericContext *prev_generic_context;
6416 gboolean ret_var_set, prev_ret_var_set, prev_disable_inline, virtual_ = FALSE;
6418 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
6420 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
6421 if ((! inline_always) && ! check_inline_called_method_name_limit (cmethod))
6424 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
6425 if ((! inline_always) && ! check_inline_caller_method_name_limit (cfg->method))
6430 fsig = mono_method_signature (cmethod);
6432 if (cfg->verbose_level > 2)
6433 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
6435 if (!cmethod->inline_info) {
6436 cfg->stat_inlineable_methods++;
6437 cmethod->inline_info = 1;
6440 /* allocate local variables */
6441 cheader = mono_method_get_header_checked (cmethod, &error);
6443 if (inline_always) {
6444 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
6445 mono_error_move (&cfg->error, &error);
6447 mono_error_cleanup (&error);
6452 /*Must verify before creating locals as it can cause the JIT to assert.*/
6453 if (mono_compile_is_broken (cfg, cmethod, FALSE)) {
6454 mono_metadata_free_mh (cheader);
6458 /* allocate space to store the return value */
6459 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6460 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
6463 prev_locals = cfg->locals;
6464 cfg->locals = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
6465 for (i = 0; i < cheader->num_locals; ++i)
6466 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
6468 /* allocate start and end blocks */
6469 /* This is needed so if the inline is aborted, we can clean up */
6470 NEW_BBLOCK (cfg, sbblock);
6471 sbblock->real_offset = real_offset;
6473 NEW_BBLOCK (cfg, ebblock);
6474 ebblock->block_num = cfg->num_bblocks++;
6475 ebblock->real_offset = real_offset;
6477 prev_args = cfg->args;
6478 prev_arg_types = cfg->arg_types;
6479 prev_inlined_method = cfg->inlined_method;
6480 cfg->inlined_method = cmethod;
6481 cfg->ret_var_set = FALSE;
6482 cfg->inline_depth ++;
6483 prev_real_offset = cfg->real_offset;
6484 prev_cbb_hash = cfg->cbb_hash;
6485 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
6486 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
6487 prev_cil_start = cfg->cil_start;
6489 prev_cbb = cfg->cbb;
6490 prev_current_method = cfg->current_method;
6491 prev_generic_context = cfg->generic_context;
6492 prev_ret_var_set = cfg->ret_var_set;
6493 prev_disable_inline = cfg->disable_inline;
6495 if (ip && *ip == CEE_CALLVIRT && !(cmethod->flags & METHOD_ATTRIBUTE_STATIC))
6498 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, sp, real_offset, virtual_);
6500 ret_var_set = cfg->ret_var_set;
6502 cfg->inlined_method = prev_inlined_method;
6503 cfg->real_offset = prev_real_offset;
6504 cfg->cbb_hash = prev_cbb_hash;
6505 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
6506 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
6507 cfg->cil_start = prev_cil_start;
6509 cfg->locals = prev_locals;
6510 cfg->args = prev_args;
6511 cfg->arg_types = prev_arg_types;
6512 cfg->current_method = prev_current_method;
6513 cfg->generic_context = prev_generic_context;
6514 cfg->ret_var_set = prev_ret_var_set;
6515 cfg->disable_inline = prev_disable_inline;
6516 cfg->inline_depth --;
6518 if ((costs >= 0 && costs < 60) || inline_always || (costs >= 0 && (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))) {
6519 if (cfg->verbose_level > 2)
6520 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
6522 cfg->stat_inlined_methods++;
6524 /* always add some code to avoid block split failures */
6525 MONO_INST_NEW (cfg, ins, OP_NOP);
6526 MONO_ADD_INS (prev_cbb, ins);
6528 prev_cbb->next_bb = sbblock;
6529 link_bblock (cfg, prev_cbb, sbblock);
6532 * Get rid of the begin and end bblocks if possible to aid local
6535 if (prev_cbb->out_count == 1)
6536 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
6538 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
6539 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
6541 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
6542 MonoBasicBlock *prev = ebblock->in_bb [0];
6544 if (prev->next_bb == ebblock) {
6545 mono_merge_basic_blocks (cfg, prev, ebblock);
6547 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
6548 mono_merge_basic_blocks (cfg, prev_cbb, prev);
6549 cfg->cbb = prev_cbb;
6552 /* There could be a bblock after 'prev', and making 'prev' the current bb could cause problems */
6557 * Its possible that the rvar is set in some prev bblock, but not in others.
6563 for (i = 0; i < ebblock->in_count; ++i) {
6564 bb = ebblock->in_bb [i];
6566 if (bb->last_ins && bb->last_ins->opcode == OP_NOT_REACHED) {
6569 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
6579 * If the inlined method contains only a throw, then the ret var is not
6580 * set, so set it to a dummy value.
6583 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
6585 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
6588 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
6591 if (cfg->verbose_level > 2)
6592 printf ("INLINE ABORTED %s (cost %d)\n", mono_method_full_name (cmethod, TRUE), costs);
6593 cfg->exception_type = MONO_EXCEPTION_NONE;
6595 /* This gets rid of the newly added bblocks */
6596 cfg->cbb = prev_cbb;
6598 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
6603 * Some of these comments may well be out-of-date.
6604 * Design decisions: we do a single pass over the IL code (and we do bblock
6605 * splitting/merging in the few cases when it's required: a back jump to an IL
6606 * address that was not already seen as bblock starting point).
6607 * Code is validated as we go (full verification is still better left to metadata/verify.c).
6608 * Complex operations are decomposed in simpler ones right away. We need to let the
6609 * arch-specific code peek and poke inside this process somehow (except when the
6610 * optimizations can take advantage of the full semantic info of coarse opcodes).
6611 * All the opcodes of the form opcode.s are 'normalized' to opcode.
6612 * MonoInst->opcode initially is the IL opcode or some simplification of that
6613 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
6614 * opcode with value bigger than OP_LAST.
6615 * At this point the IR can be handed over to an interpreter, a dumb code generator
6616 * or to the optimizing code generator that will translate it to SSA form.
6618 * Profiling directed optimizations.
6619 * We may compile by default with few or no optimizations and instrument the code
6620 * or the user may indicate what methods to optimize the most either in a config file
6621 * or through repeated runs where the compiler applies offline the optimizations to
6622 * each method and then decides if it was worth it.
6625 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
6626 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
6627 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
6628 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
6629 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
6630 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
6631 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
6632 #define CHECK_TYPELOAD(klass) if (!(klass) || mono_class_has_failure (klass)) TYPE_LOAD_ERROR ((klass))
6634 /* offset from br.s -> br like opcodes */
6635 #define BIG_BRANCH_OFFSET 13
6638 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
6640 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
6642 return b == NULL || b == bb;
6646 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
6648 unsigned char *ip = start;
6649 unsigned char *target;
6652 MonoBasicBlock *bblock;
6653 const MonoOpcode *opcode;
6656 cli_addr = ip - start;
6657 i = mono_opcode_value ((const guint8 **)&ip, end);
6660 opcode = &mono_opcodes [i];
6661 switch (opcode->argument) {
6662 case MonoInlineNone:
6665 case MonoInlineString:
6666 case MonoInlineType:
6667 case MonoInlineField:
6668 case MonoInlineMethod:
6671 case MonoShortInlineR:
6678 case MonoShortInlineVar:
6679 case MonoShortInlineI:
6682 case MonoShortInlineBrTarget:
6683 target = start + cli_addr + 2 + (signed char)ip [1];
6684 GET_BBLOCK (cfg, bblock, target);
6687 GET_BBLOCK (cfg, bblock, ip);
6689 case MonoInlineBrTarget:
6690 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
6691 GET_BBLOCK (cfg, bblock, target);
6694 GET_BBLOCK (cfg, bblock, ip);
6696 case MonoInlineSwitch: {
6697 guint32 n = read32 (ip + 1);
6700 cli_addr += 5 + 4 * n;
6701 target = start + cli_addr;
6702 GET_BBLOCK (cfg, bblock, target);
6704 for (j = 0; j < n; ++j) {
6705 target = start + cli_addr + (gint32)read32 (ip);
6706 GET_BBLOCK (cfg, bblock, target);
6716 g_assert_not_reached ();
6719 if (i == CEE_THROW) {
6720 unsigned char *bb_start = ip - 1;
6722 /* Find the start of the bblock containing the throw */
6724 while ((bb_start >= start) && !bblock) {
6725 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
6729 bblock->out_of_line = 1;
6739 static inline MonoMethod *
6740 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context, MonoError *error)
6746 if (m->wrapper_type != MONO_WRAPPER_NONE) {
6747 method = (MonoMethod *)mono_method_get_wrapper_data (m, token);
6749 method = mono_class_inflate_generic_method_checked (method, context, error);
6752 method = mono_get_method_checked (m->klass->image, token, klass, context, error);
6758 static inline MonoMethod *
6759 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
6762 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context, cfg ? &cfg->error : &error);
6764 if (method && cfg && !cfg->gshared && mono_class_is_open_constructed_type (&method->klass->byval_arg)) {
6765 mono_error_set_bad_image (&cfg->error, cfg->method->klass->image, "Method with open type while not compiling gshared");
6769 if (!method && !cfg)
6770 mono_error_cleanup (&error); /* FIXME don't swallow the error */
6775 static inline MonoClass*
6776 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
6781 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6782 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
6784 klass = mono_class_inflate_generic_class_checked (klass, context, &error);
6785 mono_error_cleanup (&error); /* FIXME don't swallow the error */
6788 klass = mono_class_get_and_inflate_typespec_checked (method->klass->image, token, context, &error);
6789 mono_error_cleanup (&error); /* FIXME don't swallow the error */
6792 mono_class_init (klass);
6796 static inline MonoMethodSignature*
6797 mini_get_signature (MonoMethod *method, guint32 token, MonoGenericContext *context, MonoError *error)
6799 MonoMethodSignature *fsig;
6802 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6803 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
6805 fsig = mono_metadata_parse_signature_checked (method->klass->image, token, error);
6806 return_val_if_nok (error, NULL);
6809 fsig = mono_inflate_generic_signature(fsig, context, error);
6815 throw_exception (void)
6817 static MonoMethod *method = NULL;
6820 MonoSecurityManager *secman = mono_security_manager_get_methods ();
6821 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
6828 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
6830 MonoMethod *thrower = throw_exception ();
6833 EMIT_NEW_PCONST (cfg, args [0], ex);
6834 mono_emit_method_call (cfg, thrower, args, NULL);
6838 * Return the original method is a wrapper is specified. We can only access
6839 * the custom attributes from the original method.
6842 get_original_method (MonoMethod *method)
6844 if (method->wrapper_type == MONO_WRAPPER_NONE)
6847 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
6848 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
6851 /* in other cases we need to find the original method */
6852 return mono_marshal_method_from_wrapper (method);
6856 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field)
6858 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
6859 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
6861 emit_throw_exception (cfg, ex);
6865 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
6867 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
6868 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
6870 emit_throw_exception (cfg, ex);
6874 * Check that the IL instructions at ip are the array initialization
6875 * sequence and return the pointer to the data and the size.
6878 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
6881 * newarr[System.Int32]
6883 * ldtoken field valuetype ...
6884 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
6886 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
6888 guint32 token = read32 (ip + 7);
6889 guint32 field_token = read32 (ip + 2);
6890 guint32 field_index = field_token & 0xffffff;
6892 const char *data_ptr;
6894 MonoMethod *cmethod;
6895 MonoClass *dummy_class;
6896 MonoClassField *field = mono_field_from_token_checked (method->klass->image, field_token, &dummy_class, NULL, &error);
6900 mono_error_cleanup (&error); /* FIXME don't swallow the error */
6904 *out_field_token = field_token;
6906 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
6909 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
6911 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
6912 case MONO_TYPE_BOOLEAN:
6916 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
6917 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
6918 case MONO_TYPE_CHAR:
6935 if (size > mono_type_size (field->type, &dummy_align))
6938 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
6939 if (!image_is_dynamic (method->klass->image)) {
6940 field_index = read32 (ip + 2) & 0xffffff;
6941 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
6942 data_ptr = mono_image_rva_map (method->klass->image, rva);
6943 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
6944 /* for aot code we do the lookup on load */
6945 if (aot && data_ptr)
6946 return (const char *)GUINT_TO_POINTER (rva);
6948 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
6950 data_ptr = mono_field_get_data (field);
6958 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
6961 char *method_fname = mono_method_full_name (method, TRUE);
6963 MonoMethodHeader *header = mono_method_get_header_checked (method, &error);
6966 method_code = g_strdup_printf ("could not parse method body due to %s", mono_error_get_message (&error));
6967 mono_error_cleanup (&error);
6968 } else if (header->code_size == 0)
6969 method_code = g_strdup ("method body is empty.");
6971 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
6972 mono_cfg_set_exception_invalid_program (cfg, g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code));
6973 g_free (method_fname);
6974 g_free (method_code);
6975 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
6979 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
6982 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
6983 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
6984 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
6985 /* Optimize reg-reg moves away */
6987 * Can't optimize other opcodes, since sp[0] might point to
6988 * the last ins of a decomposed opcode.
6990 sp [0]->dreg = (cfg)->locals [n]->dreg;
6992 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
6997 * ldloca inhibits many optimizations so try to get rid of it in common
7000 static inline unsigned char *
7001 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
7011 local = read16 (ip + 2);
7015 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
7016 /* From the INITOBJ case */
7017 token = read32 (ip + 2);
7018 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
7019 CHECK_TYPELOAD (klass);
7020 type = mini_get_underlying_type (&klass->byval_arg);
7021 emit_init_local (cfg, local, type, TRUE);
7029 emit_llvmonly_virtual_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used, MonoInst **sp)
7031 MonoInst *icall_args [16];
7032 MonoInst *call_target, *ins, *vtable_ins;
7033 int arg_reg, this_reg, vtable_reg;
7034 gboolean is_iface = mono_class_is_interface (cmethod->klass);
7035 gboolean is_gsharedvt = cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig);
7036 gboolean variant_iface = FALSE;
7039 gboolean special_array_interface = cmethod->klass->is_array_special_interface;
7042 * In llvm-only mode, vtables contain function descriptors instead of
7043 * method addresses/trampolines.
7045 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
7048 slot = mono_method_get_imt_slot (cmethod);
7050 slot = mono_method_get_vtable_index (cmethod);
7052 this_reg = sp [0]->dreg;
7054 if (is_iface && mono_class_has_variant_generic_params (cmethod->klass))
7055 variant_iface = TRUE;
7057 if (!fsig->generic_param_count && !is_iface && !is_gsharedvt) {
7059 * The simplest case, a normal virtual call.
7061 int slot_reg = alloc_preg (cfg);
7062 int addr_reg = alloc_preg (cfg);
7063 int arg_reg = alloc_preg (cfg);
7064 MonoBasicBlock *non_null_bb;
7066 vtable_reg = alloc_preg (cfg);
7067 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_ins, OP_LOAD_MEMBASE, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
7068 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) + (slot * SIZEOF_VOID_P);
7070 /* Load the vtable slot, which contains a function descriptor. */
7071 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, slot_reg, vtable_reg, offset);
7073 NEW_BBLOCK (cfg, non_null_bb);
7075 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, slot_reg, 0);
7076 cfg->cbb->last_ins->flags |= MONO_INST_LIKELY;
7077 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, non_null_bb);
7080 // FIXME: Make the wrapper use the preserveall cconv
7081 // FIXME: Use one icall per slot for small slot numbers ?
7082 icall_args [0] = vtable_ins;
7083 EMIT_NEW_ICONST (cfg, icall_args [1], slot);
7084 /* Make the icall return the vtable slot value to save some code space */
7085 ins = mono_emit_jit_icall (cfg, mono_init_vtable_slot, icall_args);
7086 ins->dreg = slot_reg;
7087 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, non_null_bb);
7090 MONO_START_BB (cfg, non_null_bb);
7091 /* Load the address + arg from the vtable slot */
7092 EMIT_NEW_LOAD_MEMBASE (cfg, call_target, OP_LOAD_MEMBASE, addr_reg, slot_reg, 0);
7093 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, arg_reg, slot_reg, SIZEOF_VOID_P);
7095 return emit_extra_arg_calli (cfg, fsig, sp, arg_reg, call_target);
7098 if (!fsig->generic_param_count && is_iface && !variant_iface && !is_gsharedvt && !special_array_interface) {
7100 * A simple interface call
7102 * We make a call through an imt slot to obtain the function descriptor we need to call.
7103 * The imt slot contains a function descriptor for a runtime function + arg.
7105 int slot_reg = alloc_preg (cfg);
7106 int addr_reg = alloc_preg (cfg);
7107 int arg_reg = alloc_preg (cfg);
7108 MonoInst *thunk_addr_ins, *thunk_arg_ins, *ftndesc_ins;
7110 vtable_reg = alloc_preg (cfg);
7111 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_ins, OP_LOAD_MEMBASE, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
7112 offset = ((gint32)slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
7115 * The slot is already initialized when the vtable is created so there is no need
7119 /* Load the imt slot, which contains a function descriptor. */
7120 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, slot_reg, vtable_reg, offset);
7122 /* Load the address + arg of the imt thunk from the imt slot */
7123 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_addr_ins, OP_LOAD_MEMBASE, addr_reg, slot_reg, 0);
7124 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_arg_ins, OP_LOAD_MEMBASE, arg_reg, slot_reg, SIZEOF_VOID_P);
7126 * IMT thunks in llvm-only mode are C functions which take an info argument
7127 * plus the imt method and return the ftndesc to call.
7129 icall_args [0] = thunk_arg_ins;
7130 icall_args [1] = emit_get_rgctx_method (cfg, context_used,
7131 cmethod, MONO_RGCTX_INFO_METHOD);
7132 ftndesc_ins = mono_emit_calli (cfg, helper_sig_llvmonly_imt_trampoline, icall_args, thunk_addr_ins, NULL, NULL);
7134 return emit_llvmonly_calli (cfg, fsig, sp, ftndesc_ins);
7137 if ((fsig->generic_param_count || variant_iface || special_array_interface) && !is_gsharedvt) {
7139 * This is similar to the interface case, the vtable slot points to an imt thunk which is
7140 * dynamically extended as more instantiations are discovered.
7141 * This handles generic virtual methods both on classes and interfaces.
7143 int slot_reg = alloc_preg (cfg);
7144 int addr_reg = alloc_preg (cfg);
7145 int arg_reg = alloc_preg (cfg);
7146 int ftndesc_reg = alloc_preg (cfg);
7147 MonoInst *thunk_addr_ins, *thunk_arg_ins, *ftndesc_ins;
7148 MonoBasicBlock *slowpath_bb, *end_bb;
7150 NEW_BBLOCK (cfg, slowpath_bb);
7151 NEW_BBLOCK (cfg, end_bb);
7153 vtable_reg = alloc_preg (cfg);
7154 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_ins, OP_LOAD_MEMBASE, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
7156 offset = ((gint32)slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
7158 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) + (slot * SIZEOF_VOID_P);
7160 /* Load the slot, which contains a function descriptor. */
7161 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, slot_reg, vtable_reg, offset);
7163 /* These slots are not initialized, so fall back to the slow path until they are initialized */
7164 /* That happens when mono_method_add_generic_virtual_invocation () creates an IMT thunk */
7165 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, slot_reg, 0);
7166 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, slowpath_bb);
7169 /* Same as with iface calls */
7170 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_addr_ins, OP_LOAD_MEMBASE, addr_reg, slot_reg, 0);
7171 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_arg_ins, OP_LOAD_MEMBASE, arg_reg, slot_reg, SIZEOF_VOID_P);
7172 icall_args [0] = thunk_arg_ins;
7173 icall_args [1] = emit_get_rgctx_method (cfg, context_used,
7174 cmethod, MONO_RGCTX_INFO_METHOD);
7175 ftndesc_ins = mono_emit_calli (cfg, helper_sig_llvmonly_imt_trampoline, icall_args, thunk_addr_ins, NULL, NULL);
7176 ftndesc_ins->dreg = ftndesc_reg;
7178 * Unlike normal iface calls, these imt thunks can return NULL, i.e. when they are passed an instantiation
7179 * they don't know about yet. Fall back to the slowpath in that case.
7181 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, ftndesc_reg, 0);
7182 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, slowpath_bb);
7184 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
7187 MONO_START_BB (cfg, slowpath_bb);
7188 icall_args [0] = vtable_ins;
7189 EMIT_NEW_ICONST (cfg, icall_args [1], slot);
7190 icall_args [2] = emit_get_rgctx_method (cfg, context_used,
7191 cmethod, MONO_RGCTX_INFO_METHOD);
7193 ftndesc_ins = mono_emit_jit_icall (cfg, mono_resolve_generic_virtual_iface_call, icall_args);
7195 ftndesc_ins = mono_emit_jit_icall (cfg, mono_resolve_generic_virtual_call, icall_args);
7196 ftndesc_ins->dreg = ftndesc_reg;
7197 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
7200 MONO_START_BB (cfg, end_bb);
7201 return emit_llvmonly_calli (cfg, fsig, sp, ftndesc_ins);
7205 * Non-optimized cases
7207 icall_args [0] = sp [0];
7208 EMIT_NEW_ICONST (cfg, icall_args [1], slot);
7210 icall_args [2] = emit_get_rgctx_method (cfg, context_used,
7211 cmethod, MONO_RGCTX_INFO_METHOD);
7213 arg_reg = alloc_preg (cfg);
7214 MONO_EMIT_NEW_PCONST (cfg, arg_reg, NULL);
7215 EMIT_NEW_VARLOADA_VREG (cfg, icall_args [3], arg_reg, &mono_defaults.int_class->byval_arg);
7217 g_assert (is_gsharedvt);
7219 call_target = mono_emit_jit_icall (cfg, mono_resolve_iface_call_gsharedvt, icall_args);
7221 call_target = mono_emit_jit_icall (cfg, mono_resolve_vcall_gsharedvt, icall_args);
7224 * Pass the extra argument even if the callee doesn't receive it, most
7225 * calling conventions allow this.
7227 return emit_extra_arg_calli (cfg, fsig, sp, arg_reg, call_target);
7231 is_exception_class (MonoClass *klass)
7234 if (klass == mono_defaults.exception_class)
7236 klass = klass->parent;
7242 * is_jit_optimizer_disabled:
7244 * Determine whenever M's assembly has a DebuggableAttribute with the
7245 * IsJITOptimizerDisabled flag set.
7248 is_jit_optimizer_disabled (MonoMethod *m)
7251 MonoAssembly *ass = m->klass->image->assembly;
7252 MonoCustomAttrInfo* attrs;
7255 gboolean val = FALSE;
7258 if (ass->jit_optimizer_disabled_inited)
7259 return ass->jit_optimizer_disabled;
7261 klass = mono_class_try_get_debuggable_attribute_class ();
7265 ass->jit_optimizer_disabled = FALSE;
7266 mono_memory_barrier ();
7267 ass->jit_optimizer_disabled_inited = TRUE;
7271 attrs = mono_custom_attrs_from_assembly_checked (ass, FALSE, &error);
7272 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7274 for (i = 0; i < attrs->num_attrs; ++i) {
7275 MonoCustomAttrEntry *attr = &attrs->attrs [i];
7277 MonoMethodSignature *sig;
7279 if (!attr->ctor || attr->ctor->klass != klass)
7281 /* Decode the attribute. See reflection.c */
7282 p = (const char*)attr->data;
7283 g_assert (read16 (p) == 0x0001);
7286 // FIXME: Support named parameters
7287 sig = mono_method_signature (attr->ctor);
7288 if (sig->param_count != 2 || sig->params [0]->type != MONO_TYPE_BOOLEAN || sig->params [1]->type != MONO_TYPE_BOOLEAN)
7290 /* Two boolean arguments */
7294 mono_custom_attrs_free (attrs);
7297 ass->jit_optimizer_disabled = val;
7298 mono_memory_barrier ();
7299 ass->jit_optimizer_disabled_inited = TRUE;
7305 is_supported_tail_call (MonoCompile *cfg, MonoMethod *method, MonoMethod *cmethod, MonoMethodSignature *fsig, int call_opcode)
7307 gboolean supported_tail_call;
7310 supported_tail_call = mono_arch_tail_call_supported (cfg, mono_method_signature (method), mono_method_signature (cmethod));
7312 for (i = 0; i < fsig->param_count; ++i) {
7313 if (fsig->params [i]->byref || fsig->params [i]->type == MONO_TYPE_PTR || fsig->params [i]->type == MONO_TYPE_FNPTR)
7314 /* These can point to the current method's stack */
7315 supported_tail_call = FALSE;
7317 if (fsig->hasthis && cmethod->klass->valuetype)
7318 /* this might point to the current method's stack */
7319 supported_tail_call = FALSE;
7320 if (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)
7321 supported_tail_call = FALSE;
7322 if (cfg->method->save_lmf)
7323 supported_tail_call = FALSE;
7324 if (cmethod->wrapper_type && cmethod->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
7325 supported_tail_call = FALSE;
7326 if (call_opcode != CEE_CALL)
7327 supported_tail_call = FALSE;
7329 /* Debugging support */
7331 if (supported_tail_call) {
7332 if (!mono_debug_count ())
7333 supported_tail_call = FALSE;
7337 return supported_tail_call;
7343 * Handle calls made to ctors from NEWOBJ opcodes.
7346 handle_ctor_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used,
7347 MonoInst **sp, guint8 *ip, int *inline_costs)
7349 MonoInst *vtable_arg = NULL, *callvirt_this_arg = NULL, *ins;
7351 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
7352 mono_method_is_generic_sharable (cmethod, TRUE)) {
7353 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
7354 mono_class_vtable (cfg->domain, cmethod->klass);
7355 CHECK_TYPELOAD (cmethod->klass);
7357 vtable_arg = emit_get_rgctx_method (cfg, context_used,
7358 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7361 vtable_arg = mini_emit_get_rgctx_klass (cfg, context_used,
7362 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7364 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7366 CHECK_TYPELOAD (cmethod->klass);
7367 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7372 /* Avoid virtual calls to ctors if possible */
7373 if (mono_class_is_marshalbyref (cmethod->klass))
7374 callvirt_this_arg = sp [0];
7376 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
7377 g_assert (MONO_TYPE_IS_VOID (fsig->ret));
7378 CHECK_CFG_EXCEPTION;
7379 } else if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
7380 mono_method_check_inlining (cfg, cmethod) &&
7381 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE)) {
7384 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, FALSE))) {
7385 cfg->real_offset += 5;
7387 *inline_costs += costs - 5;
7389 INLINE_FAILURE ("inline failure");
7390 // FIXME-VT: Clean this up
7391 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig))
7392 GSHAREDVT_FAILURE(*ip);
7393 mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, callvirt_this_arg, NULL, NULL);
7395 } else if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
7398 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE);
7400 if (cfg->llvm_only) {
7401 // FIXME: Avoid initializing vtable_arg
7402 emit_llvmonly_calli (cfg, fsig, sp, addr);
7404 mono_emit_calli (cfg, fsig, sp, addr, NULL, vtable_arg);
7406 } else if (context_used &&
7407 ((!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
7408 !mono_class_generic_sharing_enabled (cmethod->klass)) || cfg->gsharedvt)) {
7409 MonoInst *cmethod_addr;
7411 /* Generic calls made out of gsharedvt methods cannot be patched, so use an indirect call */
7413 if (cfg->llvm_only) {
7414 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, cmethod,
7415 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7416 emit_llvmonly_calli (cfg, fsig, sp, addr);
7418 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
7419 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7421 mono_emit_calli (cfg, fsig, sp, cmethod_addr, NULL, vtable_arg);
7424 INLINE_FAILURE ("ctor call");
7425 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp,
7426 callvirt_this_arg, NULL, vtable_arg);
7433 emit_setret (MonoCompile *cfg, MonoInst *val)
7435 MonoType *ret_type = mini_get_underlying_type (mono_method_signature (cfg->method)->ret);
7438 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
7441 if (!cfg->vret_addr) {
7442 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, val);
7444 EMIT_NEW_RETLOADA (cfg, ret_addr);
7446 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, val->dreg);
7447 ins->klass = mono_class_from_mono_type (ret_type);
7450 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
7451 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
7452 MonoInst *iargs [1];
7456 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
7457 mono_arch_emit_setret (cfg, cfg->method, conv);
7459 mono_arch_emit_setret (cfg, cfg->method, val);
7462 mono_arch_emit_setret (cfg, cfg->method, val);
7468 * mono_method_to_ir:
7470 * Translate the .net IL into linear IR.
7472 * @start_bblock: if not NULL, the starting basic block, used during inlining.
7473 * @end_bblock: if not NULL, the ending basic block, used during inlining.
7474 * @return_var: if not NULL, the place where the return value is stored, used during inlining.
7475 * @inline_args: if not NULL, contains the arguments to the inline call
7476 * @inline_offset: if not zero, the real offset from the inline call, or zero otherwise.
7477 * @is_virtual_call: whether this method is being called as a result of a call to callvirt
7479 * This method is used to turn ECMA IL into Mono's internal Linear IR
7480 * reprensetation. It is used both for entire methods, as well as
7481 * inlining existing methods. In the former case, the @start_bblock,
7482 * @end_bblock, @return_var, @inline_args are all set to NULL, and the
7483 * inline_offset is set to zero.
7485 * Returns: the inline cost, or -1 if there was an error processing this method.
7488 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
7489 MonoInst *return_var, MonoInst **inline_args,
7490 guint inline_offset, gboolean is_virtual_call)
7493 MonoInst *ins, **sp, **stack_start;
7494 MonoBasicBlock *tblock = NULL, *init_localsbb = NULL;
7495 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
7496 MonoMethod *cmethod, *method_definition;
7497 MonoInst **arg_array;
7498 MonoMethodHeader *header;
7500 guint32 token, ins_flag;
7502 MonoClass *constrained_class = NULL;
7503 unsigned char *ip, *end, *target, *err_pos;
7504 MonoMethodSignature *sig;
7505 MonoGenericContext *generic_context = NULL;
7506 MonoGenericContainer *generic_container = NULL;
7507 MonoType **param_types;
7508 int i, n, start_new_bblock, dreg;
7509 int num_calls = 0, inline_costs = 0;
7510 int breakpoint_id = 0;
7512 GSList *class_inits = NULL;
7513 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
7515 gboolean init_locals, seq_points, skip_dead_blocks;
7516 gboolean sym_seq_points = FALSE;
7517 MonoDebugMethodInfo *minfo;
7518 MonoBitSet *seq_point_locs = NULL;
7519 MonoBitSet *seq_point_set_locs = NULL;
7521 cfg->disable_inline = is_jit_optimizer_disabled (method);
7523 /* serialization and xdomain stuff may need access to private fields and methods */
7524 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
7525 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
7526 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
7527 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
7528 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
7529 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
7531 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
7532 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
7533 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
7534 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
7535 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_STELEMREF;
7537 image = method->klass->image;
7538 header = mono_method_get_header_checked (method, &cfg->error);
7540 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
7541 goto exception_exit;
7543 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
7546 generic_container = mono_method_get_generic_container (method);
7547 sig = mono_method_signature (method);
7548 num_args = sig->hasthis + sig->param_count;
7549 ip = (unsigned char*)header->code;
7550 cfg->cil_start = ip;
7551 end = ip + header->code_size;
7552 cfg->stat_cil_code_size += header->code_size;
7554 seq_points = cfg->gen_seq_points && cfg->method == method;
7556 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
7557 /* We could hit a seq point before attaching to the JIT (#8338) */
7561 if (cfg->gen_sdb_seq_points && cfg->method == method) {
7562 minfo = mono_debug_lookup_method (method);
7564 MonoSymSeqPoint *sps;
7565 int i, n_il_offsets;
7567 mono_debug_get_seq_points (minfo, NULL, NULL, NULL, &sps, &n_il_offsets);
7568 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7569 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7570 sym_seq_points = TRUE;
7571 for (i = 0; i < n_il_offsets; ++i) {
7572 if (sps [i].il_offset < header->code_size)
7573 mono_bitset_set_fast (seq_point_locs, sps [i].il_offset);
7577 MonoDebugMethodAsyncInfo* asyncMethod = mono_debug_lookup_method_async_debug_info (method);
7579 for (i = 0; asyncMethod != NULL && i < asyncMethod->num_awaits; i++)
7581 mono_bitset_set_fast (seq_point_locs, asyncMethod->resume_offsets[i]);
7582 mono_bitset_set_fast (seq_point_locs, asyncMethod->yield_offsets[i]);
7584 mono_debug_free_method_async_debug_info (asyncMethod);
7586 } else if (!method->wrapper_type && !method->dynamic && mono_debug_image_has_debug_info (method->klass->image)) {
7587 /* Methods without line number info like auto-generated property accessors */
7588 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7589 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7590 sym_seq_points = TRUE;
7595 * Methods without init_locals set could cause asserts in various passes
7596 * (#497220). To work around this, we emit dummy initialization opcodes
7597 * (OP_DUMMY_ICONST etc.) which generate no code. These are only supported
7598 * on some platforms.
7600 if ((cfg->opt & MONO_OPT_UNSAFE) && cfg->backend->have_dummy_init)
7601 init_locals = header->init_locals;
7605 method_definition = method;
7606 while (method_definition->is_inflated) {
7607 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
7608 method_definition = imethod->declaring;
7611 /* SkipVerification is not allowed if core-clr is enabled */
7612 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
7614 dont_verify_stloc = TRUE;
7617 if (sig->is_inflated)
7618 generic_context = mono_method_get_context (method);
7619 else if (generic_container)
7620 generic_context = &generic_container->context;
7621 cfg->generic_context = generic_context;
7624 g_assert (!sig->has_type_parameters);
7626 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
7627 g_assert (method->is_inflated);
7628 g_assert (mono_method_get_context (method)->method_inst);
7630 if (method->is_inflated && mono_method_get_context (method)->method_inst)
7631 g_assert (sig->generic_param_count);
7633 if (cfg->method == method) {
7634 cfg->real_offset = 0;
7636 cfg->real_offset = inline_offset;
7639 cfg->cil_offset_to_bb = (MonoBasicBlock **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
7640 cfg->cil_offset_to_bb_len = header->code_size;
7642 cfg->current_method = method;
7644 if (cfg->verbose_level > 2)
7645 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
7647 param_types = (MonoType **)mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
7649 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
7650 for (n = 0; n < sig->param_count; ++n)
7651 param_types [n + sig->hasthis] = sig->params [n];
7652 cfg->arg_types = param_types;
7654 cfg->dont_inline = g_list_prepend (cfg->dont_inline, method);
7655 if (cfg->method == method) {
7657 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
7658 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
7661 NEW_BBLOCK (cfg, start_bblock);
7662 cfg->bb_entry = start_bblock;
7663 start_bblock->cil_code = NULL;
7664 start_bblock->cil_length = 0;
7667 NEW_BBLOCK (cfg, end_bblock);
7668 cfg->bb_exit = end_bblock;
7669 end_bblock->cil_code = NULL;
7670 end_bblock->cil_length = 0;
7671 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
7672 g_assert (cfg->num_bblocks == 2);
7674 arg_array = cfg->args;
7676 if (header->num_clauses) {
7677 cfg->spvars = g_hash_table_new (NULL, NULL);
7678 cfg->exvars = g_hash_table_new (NULL, NULL);
7680 /* handle exception clauses */
7681 for (i = 0; i < header->num_clauses; ++i) {
7682 MonoBasicBlock *try_bb;
7683 MonoExceptionClause *clause = &header->clauses [i];
7684 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
7686 try_bb->real_offset = clause->try_offset;
7687 try_bb->try_start = TRUE;
7688 try_bb->region = ((i + 1) << 8) | clause->flags;
7689 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
7690 tblock->real_offset = clause->handler_offset;
7691 tblock->flags |= BB_EXCEPTION_HANDLER;
7694 * Linking the try block with the EH block hinders inlining as we won't be able to
7695 * merge the bblocks from inlining and produce an artificial hole for no good reason.
7697 if (COMPILE_LLVM (cfg))
7698 link_bblock (cfg, try_bb, tblock);
7700 if (*(ip + clause->handler_offset) == CEE_POP)
7701 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
7703 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
7704 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
7705 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
7706 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
7707 MONO_ADD_INS (tblock, ins);
7709 if (seq_points && clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY && clause->flags != MONO_EXCEPTION_CLAUSE_FILTER) {
7710 /* finally clauses already have a seq point */
7711 /* seq points for filter clauses are emitted below */
7712 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
7713 MONO_ADD_INS (tblock, ins);
7716 /* todo: is a fault block unsafe to optimize? */
7717 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
7718 tblock->flags |= BB_EXCEPTION_UNSAFE;
7721 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
7723 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
7725 /* catch and filter blocks get the exception object on the stack */
7726 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
7727 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
7729 /* mostly like handle_stack_args (), but just sets the input args */
7730 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
7731 tblock->in_scount = 1;
7732 tblock->in_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
7733 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
7737 #ifdef MONO_CONTEXT_SET_LLVM_EXC_REG
7738 /* The EH code passes in the exception in a register to both JITted and LLVM compiled code */
7739 if (!cfg->compile_llvm) {
7740 MONO_INST_NEW (cfg, ins, OP_GET_EX_OBJ);
7741 ins->dreg = tblock->in_stack [0]->dreg;
7742 MONO_ADD_INS (tblock, ins);
7745 MonoInst *dummy_use;
7748 * Add a dummy use for the exvar so its liveness info will be
7751 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
7754 if (seq_points && clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
7755 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
7756 MONO_ADD_INS (tblock, ins);
7759 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
7760 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
7761 tblock->flags |= BB_EXCEPTION_HANDLER;
7762 tblock->real_offset = clause->data.filter_offset;
7763 tblock->in_scount = 1;
7764 tblock->in_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
7765 /* The filter block shares the exvar with the handler block */
7766 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
7767 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
7768 MONO_ADD_INS (tblock, ins);
7772 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
7773 clause->data.catch_class &&
7775 mono_class_check_context_used (clause->data.catch_class)) {
7777 * In shared generic code with catch
7778 * clauses containing type variables
7779 * the exception handling code has to
7780 * be able to get to the rgctx.
7781 * Therefore we have to make sure that
7782 * the vtable/mrgctx argument (for
7783 * static or generic methods) or the
7784 * "this" argument (for non-static
7785 * methods) are live.
7787 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
7788 mini_method_get_context (method)->method_inst ||
7789 method->klass->valuetype) {
7790 mono_get_vtable_var (cfg);
7792 MonoInst *dummy_use;
7794 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
7799 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
7800 cfg->cbb = start_bblock;
7801 cfg->args = arg_array;
7802 mono_save_args (cfg, sig, inline_args);
7805 /* FIRST CODE BLOCK */
7806 NEW_BBLOCK (cfg, tblock);
7807 tblock->cil_code = ip;
7811 ADD_BBLOCK (cfg, tblock);
7813 if (cfg->method == method) {
7814 breakpoint_id = mono_debugger_method_has_breakpoint (method);
7815 if (breakpoint_id) {
7816 MONO_INST_NEW (cfg, ins, OP_BREAK);
7817 MONO_ADD_INS (cfg->cbb, ins);
7821 /* we use a separate basic block for the initialization code */
7822 NEW_BBLOCK (cfg, init_localsbb);
7823 if (cfg->method == method)
7824 cfg->bb_init = init_localsbb;
7825 init_localsbb->real_offset = cfg->real_offset;
7826 start_bblock->next_bb = init_localsbb;
7827 init_localsbb->next_bb = cfg->cbb;
7828 link_bblock (cfg, start_bblock, init_localsbb);
7829 link_bblock (cfg, init_localsbb, cfg->cbb);
7831 cfg->cbb = init_localsbb;
7833 if (cfg->gsharedvt && cfg->method == method) {
7834 MonoGSharedVtMethodInfo *info;
7835 MonoInst *var, *locals_var;
7838 info = (MonoGSharedVtMethodInfo *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoGSharedVtMethodInfo));
7839 info->method = cfg->method;
7840 info->count_entries = 16;
7841 info->entries = (MonoRuntimeGenericContextInfoTemplate *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
7842 cfg->gsharedvt_info = info;
7844 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
7845 /* prevent it from being register allocated */
7846 //var->flags |= MONO_INST_VOLATILE;
7847 cfg->gsharedvt_info_var = var;
7849 ins = emit_get_rgctx_gsharedvt_method (cfg, mini_method_check_context_used (cfg, method), method, info);
7850 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, var->dreg, ins->dreg);
7852 /* Allocate locals */
7853 locals_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
7854 /* prevent it from being register allocated */
7855 //locals_var->flags |= MONO_INST_VOLATILE;
7856 cfg->gsharedvt_locals_var = locals_var;
7858 dreg = alloc_ireg (cfg);
7859 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, dreg, var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, locals_size));
7861 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
7862 ins->dreg = locals_var->dreg;
7864 MONO_ADD_INS (cfg->cbb, ins);
7865 cfg->gsharedvt_locals_var_ins = ins;
7867 cfg->flags |= MONO_CFG_HAS_ALLOCA;
7870 ins->flags |= MONO_INST_INIT;
7874 if (mono_security_core_clr_enabled ()) {
7875 /* check if this is native code, e.g. an icall or a p/invoke */
7876 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
7877 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
7879 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
7880 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
7882 /* if this ia a native call then it can only be JITted from platform code */
7883 if ((icall || pinvk) && method->klass && method->klass->image) {
7884 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
7885 MonoException *ex = icall ? mono_get_exception_security () :
7886 mono_get_exception_method_access ();
7887 emit_throw_exception (cfg, ex);
7894 CHECK_CFG_EXCEPTION;
7896 if (header->code_size == 0)
7899 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
7904 if (cfg->method == method)
7905 mono_debug_init_method (cfg, cfg->cbb, breakpoint_id);
7907 for (n = 0; n < header->num_locals; ++n) {
7908 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
7913 /* We force the vtable variable here for all shared methods
7914 for the possibility that they might show up in a stack
7915 trace where their exact instantiation is needed. */
7916 if (cfg->gshared && method == cfg->method) {
7917 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
7918 mini_method_get_context (method)->method_inst ||
7919 method->klass->valuetype) {
7920 mono_get_vtable_var (cfg);
7922 /* FIXME: Is there a better way to do this?
7923 We need the variable live for the duration
7924 of the whole method. */
7925 cfg->args [0]->flags |= MONO_INST_VOLATILE;
7929 /* add a check for this != NULL to inlined methods */
7930 if (is_virtual_call) {
7933 NEW_ARGLOAD (cfg, arg_ins, 0);
7934 MONO_ADD_INS (cfg->cbb, arg_ins);
7935 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
7938 skip_dead_blocks = !dont_verify;
7939 if (skip_dead_blocks) {
7940 original_bb = bb = mono_basic_block_split (method, &cfg->error, header);
7945 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
7946 stack_start = sp = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
7949 start_new_bblock = 0;
7951 if (cfg->method == method)
7952 cfg->real_offset = ip - header->code;
7954 cfg->real_offset = inline_offset;
7959 if (start_new_bblock) {
7960 cfg->cbb->cil_length = ip - cfg->cbb->cil_code;
7961 if (start_new_bblock == 2) {
7962 g_assert (ip == tblock->cil_code);
7964 GET_BBLOCK (cfg, tblock, ip);
7966 cfg->cbb->next_bb = tblock;
7968 start_new_bblock = 0;
7969 for (i = 0; i < cfg->cbb->in_scount; ++i) {
7970 if (cfg->verbose_level > 3)
7971 printf ("loading %d from temp %d\n", i, (int)cfg->cbb->in_stack [i]->inst_c0);
7972 EMIT_NEW_TEMPLOAD (cfg, ins, cfg->cbb->in_stack [i]->inst_c0);
7976 g_slist_free (class_inits);
7979 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != cfg->cbb)) {
7980 link_bblock (cfg, cfg->cbb, tblock);
7981 if (sp != stack_start) {
7982 handle_stack_args (cfg, stack_start, sp - stack_start);
7984 CHECK_UNVERIFIABLE (cfg);
7986 cfg->cbb->next_bb = tblock;
7988 for (i = 0; i < cfg->cbb->in_scount; ++i) {
7989 if (cfg->verbose_level > 3)
7990 printf ("loading %d from temp %d\n", i, (int)cfg->cbb->in_stack [i]->inst_c0);
7991 EMIT_NEW_TEMPLOAD (cfg, ins, cfg->cbb->in_stack [i]->inst_c0);
7994 g_slist_free (class_inits);
7999 if (skip_dead_blocks) {
8000 int ip_offset = ip - header->code;
8002 if (ip_offset == bb->end)
8006 int op_size = mono_opcode_size (ip, end);
8007 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
8009 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
8011 if (ip_offset + op_size == bb->end) {
8012 MONO_INST_NEW (cfg, ins, OP_NOP);
8013 MONO_ADD_INS (cfg->cbb, ins);
8014 start_new_bblock = 1;
8022 * Sequence points are points where the debugger can place a breakpoint.
8023 * Currently, we generate these automatically at points where the IL
8026 if (seq_points && ((!sym_seq_points && (sp == stack_start)) || (sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code)))) {
8028 * Make methods interruptable at the beginning, and at the targets of
8029 * backward branches.
8030 * Also, do this at the start of every bblock in methods with clauses too,
8031 * to be able to handle instructions with inprecise control flow like
8033 * Backward branches are handled at the end of method-to-ir ().
8035 gboolean intr_loc = ip == header->code || (!cfg->cbb->last_ins && cfg->header->num_clauses);
8036 gboolean sym_seq_point = sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code);
8038 /* Avoid sequence points on empty IL like .volatile */
8039 // FIXME: Enable this
8040 //if (!(cfg->cbb->last_ins && cfg->cbb->last_ins->opcode == OP_SEQ_POINT)) {
8041 NEW_SEQ_POINT (cfg, ins, ip - header->code, intr_loc);
8042 if ((sp != stack_start) && !sym_seq_point)
8043 ins->flags |= MONO_INST_NONEMPTY_STACK;
8044 MONO_ADD_INS (cfg->cbb, ins);
8047 mono_bitset_set_fast (seq_point_set_locs, ip - header->code);
8050 cfg->cbb->real_offset = cfg->real_offset;
8052 if ((cfg->method == method) && cfg->coverage_info) {
8053 guint32 cil_offset = ip - header->code;
8054 cfg->coverage_info->data [cil_offset].cil_code = ip;
8056 /* TODO: Use an increment here */
8057 #if defined(TARGET_X86)
8058 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
8059 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
8061 MONO_ADD_INS (cfg->cbb, ins);
8063 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
8064 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
8068 if (cfg->verbose_level > 3)
8069 printf ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
8073 if (seq_points && !sym_seq_points && sp != stack_start) {
8075 * The C# compiler uses these nops to notify the JIT that it should
8076 * insert seq points.
8078 NEW_SEQ_POINT (cfg, ins, ip - header->code, FALSE);
8079 MONO_ADD_INS (cfg->cbb, ins);
8081 if (cfg->keep_cil_nops)
8082 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
8084 MONO_INST_NEW (cfg, ins, OP_NOP);
8086 MONO_ADD_INS (cfg->cbb, ins);
8089 if (should_insert_brekpoint (cfg->method)) {
8090 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
8092 MONO_INST_NEW (cfg, ins, OP_NOP);
8095 MONO_ADD_INS (cfg->cbb, ins);
8101 CHECK_STACK_OVF (1);
8102 n = (*ip)-CEE_LDARG_0;
8104 EMIT_NEW_ARGLOAD (cfg, ins, n);
8112 CHECK_STACK_OVF (1);
8113 n = (*ip)-CEE_LDLOC_0;
8115 EMIT_NEW_LOCLOAD (cfg, ins, n);
8124 n = (*ip)-CEE_STLOC_0;
8127 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
8129 emit_stloc_ir (cfg, sp, header, n);
8136 CHECK_STACK_OVF (1);
8139 EMIT_NEW_ARGLOAD (cfg, ins, n);
8145 CHECK_STACK_OVF (1);
8148 NEW_ARGLOADA (cfg, ins, n);
8149 MONO_ADD_INS (cfg->cbb, ins);
8159 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
8161 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
8166 CHECK_STACK_OVF (1);
8169 EMIT_NEW_LOCLOAD (cfg, ins, n);
8173 case CEE_LDLOCA_S: {
8174 unsigned char *tmp_ip;
8176 CHECK_STACK_OVF (1);
8177 CHECK_LOCAL (ip [1]);
8179 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
8185 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
8194 CHECK_LOCAL (ip [1]);
8195 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
8197 emit_stloc_ir (cfg, sp, header, ip [1]);
8202 CHECK_STACK_OVF (1);
8203 EMIT_NEW_PCONST (cfg, ins, NULL);
8204 ins->type = STACK_OBJ;
8209 CHECK_STACK_OVF (1);
8210 EMIT_NEW_ICONST (cfg, ins, -1);
8223 CHECK_STACK_OVF (1);
8224 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
8230 CHECK_STACK_OVF (1);
8232 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
8238 CHECK_STACK_OVF (1);
8239 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
8245 CHECK_STACK_OVF (1);
8246 MONO_INST_NEW (cfg, ins, OP_I8CONST);
8247 ins->type = STACK_I8;
8248 ins->dreg = alloc_dreg (cfg, STACK_I8);
8250 ins->inst_l = (gint64)read64 (ip);
8251 MONO_ADD_INS (cfg->cbb, ins);
8257 gboolean use_aotconst = FALSE;
8259 #ifdef TARGET_POWERPC
8260 /* FIXME: Clean this up */
8261 if (cfg->compile_aot)
8262 use_aotconst = TRUE;
8265 /* FIXME: we should really allocate this only late in the compilation process */
8266 f = (float *)mono_domain_alloc (cfg->domain, sizeof (float));
8268 CHECK_STACK_OVF (1);
8274 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
8276 dreg = alloc_freg (cfg);
8277 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
8278 ins->type = cfg->r4_stack_type;
8280 MONO_INST_NEW (cfg, ins, OP_R4CONST);
8281 ins->type = cfg->r4_stack_type;
8282 ins->dreg = alloc_dreg (cfg, STACK_R8);
8284 MONO_ADD_INS (cfg->cbb, ins);
8294 gboolean use_aotconst = FALSE;
8296 #ifdef TARGET_POWERPC
8297 /* FIXME: Clean this up */
8298 if (cfg->compile_aot)
8299 use_aotconst = TRUE;
8302 /* FIXME: we should really allocate this only late in the compilation process */
8303 d = (double *)mono_domain_alloc (cfg->domain, sizeof (double));
8305 CHECK_STACK_OVF (1);
8311 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
8313 dreg = alloc_freg (cfg);
8314 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
8315 ins->type = STACK_R8;
8317 MONO_INST_NEW (cfg, ins, OP_R8CONST);
8318 ins->type = STACK_R8;
8319 ins->dreg = alloc_dreg (cfg, STACK_R8);
8321 MONO_ADD_INS (cfg->cbb, ins);
8330 MonoInst *temp, *store;
8332 CHECK_STACK_OVF (1);
8336 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
8337 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
8339 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
8342 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
8355 if (sp [0]->type == STACK_R8)
8356 /* we need to pop the value from the x86 FP stack */
8357 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
8362 MonoMethodSignature *fsig;
8365 INLINE_FAILURE ("jmp");
8366 GSHAREDVT_FAILURE (*ip);
8369 if (stack_start != sp)
8371 token = read32 (ip + 1);
8372 /* FIXME: check the signature matches */
8373 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
8376 if (cfg->gshared && mono_method_check_context_used (cmethod))
8377 GENERIC_SHARING_FAILURE (CEE_JMP);
8379 emit_instrumentation_call (cfg, mono_profiler_method_leave);
8381 fsig = mono_method_signature (cmethod);
8382 n = fsig->param_count + fsig->hasthis;
8383 if (cfg->llvm_only) {
8386 args = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
8387 for (i = 0; i < n; ++i)
8388 EMIT_NEW_ARGLOAD (cfg, args [i], i);
8389 ins = mono_emit_method_call_full (cfg, cmethod, fsig, TRUE, args, NULL, NULL, NULL);
8391 * The code in mono-basic-block.c treats the rest of the code as dead, but we
8392 * have to emit a normal return since llvm expects it.
8395 emit_setret (cfg, ins);
8396 MONO_INST_NEW (cfg, ins, OP_BR);
8397 ins->inst_target_bb = end_bblock;
8398 MONO_ADD_INS (cfg->cbb, ins);
8399 link_bblock (cfg, cfg->cbb, end_bblock);
8402 } else if (cfg->backend->have_op_tail_call) {
8403 /* Handle tail calls similarly to calls */
8406 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
8407 call->method = cmethod;
8408 call->tail_call = TRUE;
8409 call->signature = mono_method_signature (cmethod);
8410 call->args = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
8411 call->inst.inst_p0 = cmethod;
8412 for (i = 0; i < n; ++i)
8413 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
8415 if (mini_type_is_vtype (mini_get_underlying_type (call->signature->ret)))
8416 call->vret_var = cfg->vret_addr;
8418 mono_arch_emit_call (cfg, call);
8419 cfg->param_area = MAX(cfg->param_area, call->stack_usage);
8420 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
8422 for (i = 0; i < num_args; ++i)
8423 /* Prevent arguments from being optimized away */
8424 arg_array [i]->flags |= MONO_INST_VOLATILE;
8426 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
8427 ins = (MonoInst*)call;
8428 ins->inst_p0 = cmethod;
8429 MONO_ADD_INS (cfg->cbb, ins);
8433 start_new_bblock = 1;
8438 MonoMethodSignature *fsig;
8441 token = read32 (ip + 1);
8445 //GSHAREDVT_FAILURE (*ip);
8450 fsig = mini_get_signature (method, token, generic_context, &cfg->error);
8453 if (method->dynamic && fsig->pinvoke) {
8457 * This is a call through a function pointer using a pinvoke
8458 * signature. Have to create a wrapper and call that instead.
8459 * FIXME: This is very slow, need to create a wrapper at JIT time
8460 * instead based on the signature.
8462 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
8463 EMIT_NEW_PCONST (cfg, args [1], fsig);
8465 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
8468 n = fsig->param_count + fsig->hasthis;
8472 //g_assert (!virtual_ || fsig->hasthis);
8476 inline_costs += 10 * num_calls++;
8479 * Making generic calls out of gsharedvt methods.
8480 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
8481 * patching gshared method addresses into a gsharedvt method.
8483 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
8485 * We pass the address to the gsharedvt trampoline in the rgctx reg
8487 MonoInst *callee = addr;
8489 if (method->wrapper_type != MONO_WRAPPER_DELEGATE_INVOKE)
8491 GSHAREDVT_FAILURE (*ip);
8495 GSHAREDVT_FAILURE (*ip);
8497 addr = emit_get_rgctx_sig (cfg, context_used,
8498 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
8499 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, callee);
8503 /* Prevent inlining of methods with indirect calls */
8504 INLINE_FAILURE ("indirect call");
8506 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST || addr->opcode == OP_GOT_ENTRY) {
8507 MonoJumpInfoType info_type;
8511 * Instead of emitting an indirect call, emit a direct call
8512 * with the contents of the aotconst as the patch info.
8514 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST) {
8515 info_type = (MonoJumpInfoType)addr->inst_c1;
8516 info_data = addr->inst_p0;
8518 info_type = (MonoJumpInfoType)addr->inst_right->inst_c1;
8519 info_data = addr->inst_right->inst_left;
8522 if (info_type == MONO_PATCH_INFO_ICALL_ADDR) {
8523 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR_CALL, info_data, fsig, sp);
8526 } else if (info_type == MONO_PATCH_INFO_JIT_ICALL_ADDR) {
8527 ins = (MonoInst*)mono_emit_abs_call (cfg, info_type, info_data, fsig, sp);
8532 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8536 /* End of call, INS should contain the result of the call, if any */
8538 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8540 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
8543 CHECK_CFG_EXCEPTION;
8547 constrained_class = NULL;
8551 case CEE_CALLVIRT: {
8552 MonoInst *addr = NULL;
8553 MonoMethodSignature *fsig = NULL;
8555 int virtual_ = *ip == CEE_CALLVIRT;
8556 gboolean pass_imt_from_rgctx = FALSE;
8557 MonoInst *imt_arg = NULL;
8558 MonoInst *keep_this_alive = NULL;
8559 gboolean pass_vtable = FALSE;
8560 gboolean pass_mrgctx = FALSE;
8561 MonoInst *vtable_arg = NULL;
8562 gboolean check_this = FALSE;
8563 gboolean supported_tail_call = FALSE;
8564 gboolean tail_call = FALSE;
8565 gboolean need_seq_point = FALSE;
8566 guint32 call_opcode = *ip;
8567 gboolean emit_widen = TRUE;
8568 gboolean push_res = TRUE;
8569 gboolean skip_ret = FALSE;
8570 gboolean delegate_invoke = FALSE;
8571 gboolean direct_icall = FALSE;
8572 gboolean constrained_partial_call = FALSE;
8573 MonoMethod *cil_method;
8576 token = read32 (ip + 1);
8580 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
8583 cil_method = cmethod;
8585 if (constrained_class) {
8586 if ((constrained_class->byval_arg.type == MONO_TYPE_VAR || constrained_class->byval_arg.type == MONO_TYPE_MVAR) && cfg->gshared) {
8587 if (!mini_is_gsharedvt_klass (constrained_class)) {
8588 g_assert (!cmethod->klass->valuetype);
8589 if (!mini_type_is_reference (&constrained_class->byval_arg))
8590 constrained_partial_call = TRUE;
8594 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8595 if (cfg->verbose_level > 2)
8596 printf ("DM Constrained call to %s\n", mono_type_get_full_name (constrained_class));
8597 if (!((constrained_class->byval_arg.type == MONO_TYPE_VAR ||
8598 constrained_class->byval_arg.type == MONO_TYPE_MVAR) &&
8600 cmethod = mono_get_method_constrained_with_method (image, cil_method, constrained_class, generic_context, &cfg->error);
8604 if (cfg->verbose_level > 2)
8605 printf ("Constrained call to %s\n", mono_type_get_full_name (constrained_class));
8607 if ((constrained_class->byval_arg.type == MONO_TYPE_VAR || constrained_class->byval_arg.type == MONO_TYPE_MVAR) && cfg->gshared) {
8609 * This is needed since get_method_constrained can't find
8610 * the method in klass representing a type var.
8611 * The type var is guaranteed to be a reference type in this
8614 if (!mini_is_gsharedvt_klass (constrained_class))
8615 g_assert (!cmethod->klass->valuetype);
8617 cmethod = mono_get_method_constrained_checked (image, token, constrained_class, generic_context, &cil_method, &cfg->error);
8622 if (constrained_class->enumtype && !strcmp (cmethod->name, "GetHashCode")) {
8623 /* Use the corresponding method from the base type to avoid boxing */
8624 MonoType *base_type = mono_class_enum_basetype (constrained_class);
8625 g_assert (base_type);
8626 constrained_class = mono_class_from_mono_type (base_type);
8627 cmethod = mono_class_get_method_from_name (constrained_class, cmethod->name, 0);
8632 if (!dont_verify && !cfg->skip_visibility) {
8633 MonoMethod *target_method = cil_method;
8634 if (method->is_inflated) {
8635 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context), &cfg->error);
8638 if (!mono_method_can_access_method (method_definition, target_method) &&
8639 !mono_method_can_access_method (method, cil_method))
8640 emit_method_access_failure (cfg, method, cil_method);
8643 if (mono_security_core_clr_enabled ())
8644 ensure_method_is_allowed_to_call_method (cfg, method, cil_method);
8646 if (!virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
8647 /* MS.NET seems to silently convert this to a callvirt */
8652 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
8653 * converts to a callvirt.
8655 * tests/bug-515884.il is an example of this behavior
8657 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
8658 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
8659 if (!virtual_ && mono_class_is_marshalbyref (cmethod->klass) && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
8663 if (!cmethod->klass->inited)
8664 if (!mono_class_init (cmethod->klass))
8665 TYPE_LOAD_ERROR (cmethod->klass);
8667 fsig = mono_method_signature (cmethod);
8670 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
8671 mini_class_is_system_array (cmethod->klass)) {
8672 array_rank = cmethod->klass->rank;
8673 } else if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) && icall_is_direct_callable (cfg, cmethod)) {
8674 direct_icall = TRUE;
8675 } else if (fsig->pinvoke) {
8676 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod, TRUE, cfg->compile_aot);
8677 fsig = mono_method_signature (wrapper);
8678 } else if (constrained_class) {
8680 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
8684 if (cfg->llvm_only && !cfg->method->wrapper_type && (!cmethod || cmethod->is_inflated))
8685 cfg->signatures = g_slist_prepend_mempool (cfg->mempool, cfg->signatures, fsig);
8687 /* See code below */
8688 if (cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
8689 MonoBasicBlock *tbb;
8691 GET_BBLOCK (cfg, tbb, ip + 5);
8692 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
8694 * We want to extend the try block to cover the call, but we can't do it if the
8695 * call is made directly since its followed by an exception check.
8697 direct_icall = FALSE;
8701 mono_save_token_info (cfg, image, token, cil_method);
8703 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip + 5 - header->code)))
8704 need_seq_point = TRUE;
8706 /* Don't support calls made using type arguments for now */
8708 if (cfg->gsharedvt) {
8709 if (mini_is_gsharedvt_signature (fsig))
8710 GSHAREDVT_FAILURE (*ip);
8714 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
8715 g_assert_not_reached ();
8717 n = fsig->param_count + fsig->hasthis;
8719 if (!cfg->gshared && mono_class_is_gtd (cmethod->klass))
8723 g_assert (!mono_method_check_context_used (cmethod));
8727 //g_assert (!virtual_ || fsig->hasthis);
8732 * We have the `constrained.' prefix opcode.
8734 if (constrained_class) {
8735 if (mini_is_gsharedvt_klass (constrained_class)) {
8736 if ((cmethod->klass != mono_defaults.object_class) && constrained_class->valuetype && cmethod->klass->valuetype) {
8737 /* The 'Own method' case below */
8738 } else if (cmethod->klass->image != mono_defaults.corlib && !mono_class_is_interface (cmethod->klass) && !cmethod->klass->valuetype) {
8739 /* 'The type parameter is instantiated as a reference type' case below. */
8741 ins = handle_constrained_gsharedvt_call (cfg, cmethod, fsig, sp, constrained_class, &emit_widen);
8742 CHECK_CFG_EXCEPTION;
8748 if (constrained_partial_call) {
8749 gboolean need_box = TRUE;
8752 * The receiver is a valuetype, but the exact type is not known at compile time. This means the
8753 * called method is not known at compile time either. The called method could end up being
8754 * one of the methods on the parent classes (object/valuetype/enum), in which case we need
8755 * to box the receiver.
8756 * A simple solution would be to box always and make a normal virtual call, but that would
8757 * be bad performance wise.
8759 if (mono_class_is_interface (cmethod->klass) && mono_class_is_ginst (cmethod->klass)) {
8761 * The parent classes implement no generic interfaces, so the called method will be a vtype method, so no boxing neccessary.
8766 if (!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
8767 /* The called method is not virtual, i.e. Object:GetType (), the receiver is a vtype, has to box */
8768 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
8769 ins->klass = constrained_class;
8770 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
8771 CHECK_CFG_EXCEPTION;
8772 } else if (need_box) {
8774 MonoBasicBlock *is_ref_bb, *end_bb;
8775 MonoInst *nonbox_call;
8778 * Determine at runtime whenever the called method is defined on object/valuetype/enum, and emit a boxing call
8780 * FIXME: It is possible to inline the called method in a lot of cases, i.e. for T_INT,
8781 * the no-box case goes to a method in Int32, while the box case goes to a method in Enum.
8783 addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
8785 NEW_BBLOCK (cfg, is_ref_bb);
8786 NEW_BBLOCK (cfg, end_bb);
8788 box_type = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_BOX_TYPE);
8789 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, box_type->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
8790 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
8793 nonbox_call = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8795 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
8798 MONO_START_BB (cfg, is_ref_bb);
8799 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
8800 ins->klass = constrained_class;
8801 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
8802 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8804 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
8806 MONO_START_BB (cfg, end_bb);
8809 nonbox_call->dreg = ins->dreg;
8812 g_assert (mono_class_is_interface (cmethod->klass));
8813 addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
8814 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8817 } else if (constrained_class->valuetype && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
8819 * The type parameter is instantiated as a valuetype,
8820 * but that type doesn't override the method we're
8821 * calling, so we need to box `this'.
8823 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
8824 ins->klass = constrained_class;
8825 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
8826 CHECK_CFG_EXCEPTION;
8827 } else if (!constrained_class->valuetype) {
8828 int dreg = alloc_ireg_ref (cfg);
8831 * The type parameter is instantiated as a reference
8832 * type. We have a managed pointer on the stack, so
8833 * we need to dereference it here.
8835 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
8836 ins->type = STACK_OBJ;
8839 if (cmethod->klass->valuetype) {
8842 /* Interface method */
8845 mono_class_setup_vtable (constrained_class);
8846 CHECK_TYPELOAD (constrained_class);
8847 ioffset = mono_class_interface_offset (constrained_class, cmethod->klass);
8849 TYPE_LOAD_ERROR (constrained_class);
8850 slot = mono_method_get_vtable_slot (cmethod);
8852 TYPE_LOAD_ERROR (cmethod->klass);
8853 cmethod = constrained_class->vtable [ioffset + slot];
8855 if (cmethod->klass == mono_defaults.enum_class) {
8856 /* Enum implements some interfaces, so treat this as the first case */
8857 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
8858 ins->klass = constrained_class;
8859 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
8860 CHECK_CFG_EXCEPTION;
8865 constrained_class = NULL;
8868 if (check_call_signature (cfg, fsig, sp))
8871 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (cmethod->name, "Invoke"))
8872 delegate_invoke = TRUE;
8874 if ((cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_sharable_method (cfg, cmethod, fsig, sp))) {
8875 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8876 type_to_eval_stack_type ((cfg), fsig->ret, ins);
8884 * If the callee is a shared method, then its static cctor
8885 * might not get called after the call was patched.
8887 if (cfg->gshared && cmethod->klass != method->klass && mono_class_is_ginst (cmethod->klass) && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
8888 emit_class_init (cfg, cmethod->klass);
8889 CHECK_TYPELOAD (cmethod->klass);
8892 check_method_sharing (cfg, cmethod, &pass_vtable, &pass_mrgctx);
8895 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
8897 context_used = mini_method_check_context_used (cfg, cmethod);
8899 if (context_used && mono_class_is_interface (cmethod->klass)) {
8900 /* Generic method interface
8901 calls are resolved via a
8902 helper function and don't
8904 if (!cmethod_context || !cmethod_context->method_inst)
8905 pass_imt_from_rgctx = TRUE;
8909 * If a shared method calls another
8910 * shared method then the caller must
8911 * have a generic sharing context
8912 * because the magic trampoline
8913 * requires it. FIXME: We shouldn't
8914 * have to force the vtable/mrgctx
8915 * variable here. Instead there
8916 * should be a flag in the cfg to
8917 * request a generic sharing context.
8920 ((cfg->method->flags & METHOD_ATTRIBUTE_STATIC) || cfg->method->klass->valuetype))
8921 mono_get_vtable_var (cfg);
8926 vtable_arg = mini_emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
8928 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
8930 CHECK_TYPELOAD (cmethod->klass);
8931 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
8936 g_assert (!vtable_arg);
8938 if (!cfg->compile_aot) {
8940 * emit_get_rgctx_method () calls mono_class_vtable () so check
8941 * for type load errors before.
8943 mono_class_setup_vtable (cmethod->klass);
8944 CHECK_TYPELOAD (cmethod->klass);
8947 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
8949 /* !marshalbyref is needed to properly handle generic methods + remoting */
8950 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
8951 MONO_METHOD_IS_FINAL (cmethod)) &&
8952 !mono_class_is_marshalbyref (cmethod->klass)) {
8959 if (pass_imt_from_rgctx) {
8960 g_assert (!pass_vtable);
8962 imt_arg = emit_get_rgctx_method (cfg, context_used,
8963 cmethod, MONO_RGCTX_INFO_METHOD);
8967 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
8969 /* Calling virtual generic methods */
8970 if (virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
8971 !(MONO_METHOD_IS_FINAL (cmethod) &&
8972 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
8973 fsig->generic_param_count &&
8974 !(cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) &&
8976 MonoInst *this_temp, *this_arg_temp, *store;
8977 MonoInst *iargs [4];
8979 g_assert (fsig->is_inflated);
8981 /* Prevent inlining of methods that contain indirect calls */
8982 INLINE_FAILURE ("virtual generic call");
8984 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig))
8985 GSHAREDVT_FAILURE (*ip);
8987 if (cfg->backend->have_generalized_imt_trampoline && cfg->backend->gshared_supported && cmethod->wrapper_type == MONO_WRAPPER_NONE) {
8988 g_assert (!imt_arg);
8990 g_assert (cmethod->is_inflated);
8991 imt_arg = emit_get_rgctx_method (cfg, context_used,
8992 cmethod, MONO_RGCTX_INFO_METHOD);
8993 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, sp [0], imt_arg, NULL);
8995 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
8996 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
8997 MONO_ADD_INS (cfg->cbb, store);
8999 /* FIXME: This should be a managed pointer */
9000 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
9002 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
9003 iargs [1] = emit_get_rgctx_method (cfg, context_used,
9004 cmethod, MONO_RGCTX_INFO_METHOD);
9005 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
9006 addr = mono_emit_jit_icall (cfg,
9007 mono_helper_compile_generic_method, iargs);
9009 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
9011 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9018 * Implement a workaround for the inherent races involved in locking:
9024 * If a thread abort happens between the call to Monitor.Enter () and the start of the
9025 * try block, the Exit () won't be executed, see:
9026 * http://www.bluebytesoftware.com/blog/2007/01/30/MonitorEnterThreadAbortsAndOrphanedLocks.aspx
9027 * To work around this, we extend such try blocks to include the last x bytes
9028 * of the Monitor.Enter () call.
9030 if (cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
9031 MonoBasicBlock *tbb;
9033 GET_BBLOCK (cfg, tbb, ip + 5);
9035 * Only extend try blocks with a finally, to avoid catching exceptions thrown
9036 * from Monitor.Enter like ArgumentNullException.
9038 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
9039 /* Mark this bblock as needing to be extended */
9040 tbb->extend_try_block = TRUE;
9044 /* Conversion to a JIT intrinsic */
9045 if ((cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
9046 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9047 type_to_eval_stack_type ((cfg), fsig->ret, ins);
9055 if ((cfg->opt & MONO_OPT_INLINE) &&
9056 (!virtual_ || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
9057 mono_method_check_inlining (cfg, cmethod)) {
9059 gboolean always = FALSE;
9061 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
9062 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
9063 /* Prevent inlining of methods that call wrappers */
9064 INLINE_FAILURE ("wrapper call");
9065 cmethod = mono_marshal_get_native_wrapper (cmethod, TRUE, FALSE);
9069 costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, always);
9071 cfg->real_offset += 5;
9073 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9074 /* *sp is already set by inline_method */
9079 inline_costs += costs;
9085 /* Tail recursion elimination */
9086 if ((cfg->opt & MONO_OPT_TAILC) && call_opcode == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
9087 gboolean has_vtargs = FALSE;
9090 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
9091 INLINE_FAILURE ("tail call");
9093 /* keep it simple */
9094 for (i = fsig->param_count - 1; i >= 0; i--) {
9095 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
9100 if (need_seq_point) {
9101 emit_seq_point (cfg, method, ip, FALSE, TRUE);
9102 need_seq_point = FALSE;
9104 for (i = 0; i < n; ++i)
9105 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
9106 MONO_INST_NEW (cfg, ins, OP_BR);
9107 MONO_ADD_INS (cfg->cbb, ins);
9108 tblock = start_bblock->out_bb [0];
9109 link_bblock (cfg, cfg->cbb, tblock);
9110 ins->inst_target_bb = tblock;
9111 start_new_bblock = 1;
9113 /* skip the CEE_RET, too */
9114 if (ip_in_bb (cfg, cfg->cbb, ip + 5))
9121 inline_costs += 10 * num_calls++;
9124 * Synchronized wrappers.
9125 * Its hard to determine where to replace a method with its synchronized
9126 * wrapper without causing an infinite recursion. The current solution is
9127 * to add the synchronized wrapper in the trampolines, and to
9128 * change the called method to a dummy wrapper, and resolve that wrapper
9129 * to the real method in mono_jit_compile_method ().
9131 if (cfg->method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
9132 MonoMethod *orig = mono_marshal_method_from_wrapper (cfg->method);
9133 if (cmethod == orig || (cmethod->is_inflated && mono_method_get_declaring_generic_method (cmethod) == orig))
9134 cmethod = mono_marshal_get_synchronized_inner_wrapper (cmethod);
9138 * Making generic calls out of gsharedvt methods.
9139 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
9140 * patching gshared method addresses into a gsharedvt method.
9142 if (cfg->gsharedvt && (mini_is_gsharedvt_signature (fsig) || cmethod->is_inflated || mono_class_is_ginst (cmethod->klass)) &&
9143 !(cmethod->klass->rank && cmethod->klass->byval_arg.type != MONO_TYPE_SZARRAY) &&
9144 (!(cfg->llvm_only && virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL)))) {
9145 MonoRgctxInfoType info_type;
9148 //if (mono_class_is_interface (cmethod->klass))
9149 //GSHAREDVT_FAILURE (*ip);
9150 // disable for possible remoting calls
9151 if (fsig->hasthis && (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class))
9152 GSHAREDVT_FAILURE (*ip);
9153 if (fsig->generic_param_count) {
9154 /* virtual generic call */
9155 g_assert (!imt_arg);
9156 /* Same as the virtual generic case above */
9157 imt_arg = emit_get_rgctx_method (cfg, context_used,
9158 cmethod, MONO_RGCTX_INFO_METHOD);
9159 /* This is not needed, as the trampoline code will pass one, and it might be passed in the same reg as the imt arg */
9161 } else if (mono_class_is_interface (cmethod->klass) && !imt_arg) {
9162 /* This can happen when we call a fully instantiated iface method */
9163 imt_arg = emit_get_rgctx_method (cfg, context_used,
9164 cmethod, MONO_RGCTX_INFO_METHOD);
9169 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (cmethod->name, "Invoke")))
9170 keep_this_alive = sp [0];
9172 if (virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))
9173 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE_VIRT;
9175 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE;
9176 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, info_type);
9178 if (cfg->llvm_only) {
9179 // FIXME: Avoid initializing vtable_arg
9180 ins = emit_llvmonly_calli (cfg, fsig, sp, addr);
9182 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
9187 /* Generic sharing */
9190 * Use this if the callee is gsharedvt sharable too, since
9191 * at runtime we might find an instantiation so the call cannot
9192 * be patched (the 'no_patch' code path in mini-trampolines.c).
9194 if (context_used && !imt_arg && !array_rank && !delegate_invoke &&
9195 (!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
9196 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
9197 (!virtual_ || MONO_METHOD_IS_FINAL (cmethod) ||
9198 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
9199 INLINE_FAILURE ("gshared");
9201 g_assert (cfg->gshared && cmethod);
9205 * We are compiling a call to a
9206 * generic method from shared code,
9207 * which means that we have to look up
9208 * the method in the rgctx and do an
9212 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
9214 if (cfg->llvm_only) {
9215 if (cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig))
9216 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GSHAREDVT_OUT_WRAPPER);
9218 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
9219 // FIXME: Avoid initializing imt_arg/vtable_arg
9220 ins = emit_llvmonly_calli (cfg, fsig, sp, addr);
9222 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
9223 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
9228 /* Direct calls to icalls */
9230 MonoMethod *wrapper;
9233 /* Inline the wrapper */
9234 wrapper = mono_marshal_get_native_wrapper (cmethod, TRUE, cfg->compile_aot);
9236 costs = inline_method (cfg, wrapper, fsig, sp, ip, cfg->real_offset, TRUE);
9237 g_assert (costs > 0);
9238 cfg->real_offset += 5;
9240 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9241 /* *sp is already set by inline_method */
9246 inline_costs += costs;
9255 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
9256 MonoInst *val = sp [fsig->param_count];
9258 if (val->type == STACK_OBJ) {
9259 MonoInst *iargs [2];
9264 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
9267 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
9268 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, val->dreg);
9269 if (cfg->gen_write_barriers && val->type == STACK_OBJ && !MONO_INS_IS_PCONST_NULL (val))
9270 emit_write_barrier (cfg, addr, val);
9271 if (cfg->gen_write_barriers && mini_is_gsharedvt_klass (cmethod->klass))
9272 GSHAREDVT_FAILURE (*ip);
9273 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
9274 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
9276 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
9277 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
9278 if (!cmethod->klass->element_class->valuetype && !readonly)
9279 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
9280 CHECK_TYPELOAD (cmethod->klass);
9283 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
9286 g_assert_not_reached ();
9293 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual_ ? sp [0] : NULL);
9297 /* Tail prefix / tail call optimization */
9299 /* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests */
9300 /* FIXME: runtime generic context pointer for jumps? */
9301 /* FIXME: handle this for generic sharing eventually */
9302 if ((ins_flag & MONO_INST_TAILCALL) &&
9303 !vtable_arg && !cfg->gshared && is_supported_tail_call (cfg, method, cmethod, fsig, call_opcode))
9304 supported_tail_call = TRUE;
9306 if (supported_tail_call) {
9309 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
9310 INLINE_FAILURE ("tail call");
9312 //printf ("HIT: %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
9314 if (cfg->backend->have_op_tail_call) {
9315 /* Handle tail calls similarly to normal calls */
9318 emit_instrumentation_call (cfg, mono_profiler_method_leave);
9320 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
9321 call->tail_call = TRUE;
9322 call->method = cmethod;
9323 call->signature = mono_method_signature (cmethod);
9326 * We implement tail calls by storing the actual arguments into the
9327 * argument variables, then emitting a CEE_JMP.
9329 for (i = 0; i < n; ++i) {
9330 /* Prevent argument from being register allocated */
9331 arg_array [i]->flags |= MONO_INST_VOLATILE;
9332 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
9334 ins = (MonoInst*)call;
9335 ins->inst_p0 = cmethod;
9336 ins->inst_p1 = arg_array [0];
9337 MONO_ADD_INS (cfg->cbb, ins);
9338 link_bblock (cfg, cfg->cbb, end_bblock);
9339 start_new_bblock = 1;
9341 // FIXME: Eliminate unreachable epilogs
9344 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
9345 * only reachable from this call.
9347 GET_BBLOCK (cfg, tblock, ip + 5);
9348 if (tblock == cfg->cbb || tblock->in_count == 0)
9357 * Virtual calls in llvm-only mode.
9359 if (cfg->llvm_only && virtual_ && cmethod && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL)) {
9360 ins = emit_llvmonly_virtual_call (cfg, cmethod, fsig, context_used, sp);
9365 if (!(cmethod->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
9366 INLINE_FAILURE ("call");
9367 ins = mono_emit_method_call_full (cfg, cmethod, fsig, tail_call, sp, virtual_ ? sp [0] : NULL,
9368 imt_arg, vtable_arg);
9370 if (tail_call && !cfg->llvm_only) {
9371 link_bblock (cfg, cfg->cbb, end_bblock);
9372 start_new_bblock = 1;
9374 // FIXME: Eliminate unreachable epilogs
9377 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
9378 * only reachable from this call.
9380 GET_BBLOCK (cfg, tblock, ip + 5);
9381 if (tblock == cfg->cbb || tblock->in_count == 0)
9388 /* End of call, INS should contain the result of the call, if any */
9390 if (push_res && !MONO_TYPE_IS_VOID (fsig->ret)) {
9393 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
9398 if (keep_this_alive) {
9399 MonoInst *dummy_use;
9401 /* See mono_emit_method_call_full () */
9402 EMIT_NEW_DUMMY_USE (cfg, dummy_use, keep_this_alive);
9405 if (cfg->llvm_only && cmethod && method_needs_stack_walk (cfg, cmethod)) {
9407 * Clang can convert these calls to tail calls which screw up the stack
9408 * walk. This happens even when the -fno-optimize-sibling-calls
9409 * option is passed to clang.
9410 * Work around this by emitting a dummy call.
9412 mono_emit_jit_icall (cfg, mono_dummy_jit_icall, NULL);
9415 CHECK_CFG_EXCEPTION;
9419 g_assert (*ip == CEE_RET);
9423 constrained_class = NULL;
9425 emit_seq_point (cfg, method, ip, FALSE, TRUE);
9429 if (cfg->method != method) {
9430 /* return from inlined method */
9432 * If in_count == 0, that means the ret is unreachable due to
9433 * being preceeded by a throw. In that case, inline_method () will
9434 * handle setting the return value
9435 * (test case: test_0_inline_throw ()).
9437 if (return_var && cfg->cbb->in_count) {
9438 MonoType *ret_type = mono_method_signature (method)->ret;
9444 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
9447 //g_assert (returnvar != -1);
9448 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
9449 cfg->ret_var_set = TRUE;
9452 emit_instrumentation_call (cfg, mono_profiler_method_leave);
9454 if (cfg->lmf_var && cfg->cbb->in_count && !cfg->llvm_only)
9458 MonoType *ret_type = mini_get_underlying_type (mono_method_signature (method)->ret);
9460 if (seq_points && !sym_seq_points) {
9462 * Place a seq point here too even through the IL stack is not
9463 * empty, so a step over on
9466 * will work correctly.
9468 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
9469 MONO_ADD_INS (cfg->cbb, ins);
9472 g_assert (!return_var);
9476 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
9479 emit_setret (cfg, *sp);
9482 if (sp != stack_start)
9484 MONO_INST_NEW (cfg, ins, OP_BR);
9486 ins->inst_target_bb = end_bblock;
9487 MONO_ADD_INS (cfg->cbb, ins);
9488 link_bblock (cfg, cfg->cbb, end_bblock);
9489 start_new_bblock = 1;
9493 MONO_INST_NEW (cfg, ins, OP_BR);
9495 target = ip + 1 + (signed char)(*ip);
9497 GET_BBLOCK (cfg, tblock, target);
9498 link_bblock (cfg, cfg->cbb, tblock);
9499 ins->inst_target_bb = tblock;
9500 if (sp != stack_start) {
9501 handle_stack_args (cfg, stack_start, sp - stack_start);
9503 CHECK_UNVERIFIABLE (cfg);
9505 MONO_ADD_INS (cfg->cbb, ins);
9506 start_new_bblock = 1;
9507 inline_costs += BRANCH_COST;
9521 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
9523 target = ip + 1 + *(signed char*)ip;
9529 inline_costs += BRANCH_COST;
9533 MONO_INST_NEW (cfg, ins, OP_BR);
9536 target = ip + 4 + (gint32)read32(ip);
9538 GET_BBLOCK (cfg, tblock, target);
9539 link_bblock (cfg, cfg->cbb, tblock);
9540 ins->inst_target_bb = tblock;
9541 if (sp != stack_start) {
9542 handle_stack_args (cfg, stack_start, sp - stack_start);
9544 CHECK_UNVERIFIABLE (cfg);
9547 MONO_ADD_INS (cfg->cbb, ins);
9549 start_new_bblock = 1;
9550 inline_costs += BRANCH_COST;
9557 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
9558 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
9559 guint32 opsize = is_short ? 1 : 4;
9561 CHECK_OPSIZE (opsize);
9563 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
9566 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
9571 GET_BBLOCK (cfg, tblock, target);
9572 link_bblock (cfg, cfg->cbb, tblock);
9573 GET_BBLOCK (cfg, tblock, ip);
9574 link_bblock (cfg, cfg->cbb, tblock);
9576 if (sp != stack_start) {
9577 handle_stack_args (cfg, stack_start, sp - stack_start);
9578 CHECK_UNVERIFIABLE (cfg);
9581 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
9582 cmp->sreg1 = sp [0]->dreg;
9583 type_from_op (cfg, cmp, sp [0], NULL);
9586 #if SIZEOF_REGISTER == 4
9587 if (cmp->opcode == OP_LCOMPARE_IMM) {
9588 /* Convert it to OP_LCOMPARE */
9589 MONO_INST_NEW (cfg, ins, OP_I8CONST);
9590 ins->type = STACK_I8;
9591 ins->dreg = alloc_dreg (cfg, STACK_I8);
9593 MONO_ADD_INS (cfg->cbb, ins);
9594 cmp->opcode = OP_LCOMPARE;
9595 cmp->sreg2 = ins->dreg;
9598 MONO_ADD_INS (cfg->cbb, cmp);
9600 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
9601 type_from_op (cfg, ins, sp [0], NULL);
9602 MONO_ADD_INS (cfg->cbb, ins);
9603 ins->inst_many_bb = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
9604 GET_BBLOCK (cfg, tblock, target);
9605 ins->inst_true_bb = tblock;
9606 GET_BBLOCK (cfg, tblock, ip);
9607 ins->inst_false_bb = tblock;
9608 start_new_bblock = 2;
9611 inline_costs += BRANCH_COST;
9626 MONO_INST_NEW (cfg, ins, *ip);
9628 target = ip + 4 + (gint32)read32(ip);
9634 inline_costs += BRANCH_COST;
9638 MonoBasicBlock **targets;
9639 MonoBasicBlock *default_bblock;
9640 MonoJumpInfoBBTable *table;
9641 int offset_reg = alloc_preg (cfg);
9642 int target_reg = alloc_preg (cfg);
9643 int table_reg = alloc_preg (cfg);
9644 int sum_reg = alloc_preg (cfg);
9645 gboolean use_op_switch;
9649 n = read32 (ip + 1);
9652 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
9656 CHECK_OPSIZE (n * sizeof (guint32));
9657 target = ip + n * sizeof (guint32);
9659 GET_BBLOCK (cfg, default_bblock, target);
9660 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
9662 targets = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
9663 for (i = 0; i < n; ++i) {
9664 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
9665 targets [i] = tblock;
9666 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
9670 if (sp != stack_start) {
9672 * Link the current bb with the targets as well, so handle_stack_args
9673 * will set their in_stack correctly.
9675 link_bblock (cfg, cfg->cbb, default_bblock);
9676 for (i = 0; i < n; ++i)
9677 link_bblock (cfg, cfg->cbb, targets [i]);
9679 handle_stack_args (cfg, stack_start, sp - stack_start);
9681 CHECK_UNVERIFIABLE (cfg);
9683 /* Undo the links */
9684 mono_unlink_bblock (cfg, cfg->cbb, default_bblock);
9685 for (i = 0; i < n; ++i)
9686 mono_unlink_bblock (cfg, cfg->cbb, targets [i]);
9689 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
9690 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
9692 for (i = 0; i < n; ++i)
9693 link_bblock (cfg, cfg->cbb, targets [i]);
9695 table = (MonoJumpInfoBBTable *)mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
9696 table->table = targets;
9697 table->table_size = n;
9699 use_op_switch = FALSE;
9701 /* ARM implements SWITCH statements differently */
9702 /* FIXME: Make it use the generic implementation */
9703 if (!cfg->compile_aot)
9704 use_op_switch = TRUE;
9707 if (COMPILE_LLVM (cfg))
9708 use_op_switch = TRUE;
9710 cfg->cbb->has_jump_table = 1;
9712 if (use_op_switch) {
9713 MONO_INST_NEW (cfg, ins, OP_SWITCH);
9714 ins->sreg1 = src1->dreg;
9715 ins->inst_p0 = table;
9716 ins->inst_many_bb = targets;
9717 ins->klass = (MonoClass *)GUINT_TO_POINTER (n);
9718 MONO_ADD_INS (cfg->cbb, ins);
9720 if (sizeof (gpointer) == 8)
9721 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
9723 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
9725 #if SIZEOF_REGISTER == 8
9726 /* The upper word might not be zero, and we add it to a 64 bit address later */
9727 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
9730 if (cfg->compile_aot) {
9731 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
9733 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
9734 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
9735 ins->inst_p0 = table;
9736 ins->dreg = table_reg;
9737 MONO_ADD_INS (cfg->cbb, ins);
9740 /* FIXME: Use load_memindex */
9741 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
9742 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
9743 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
9745 start_new_bblock = 1;
9746 inline_costs += (BRANCH_COST * 2);
9766 dreg = alloc_freg (cfg);
9769 dreg = alloc_lreg (cfg);
9772 dreg = alloc_ireg_ref (cfg);
9775 dreg = alloc_preg (cfg);
9778 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
9779 ins->type = ldind_type [*ip - CEE_LDIND_I1];
9780 if (*ip == CEE_LDIND_R4)
9781 ins->type = cfg->r4_stack_type;
9782 ins->flags |= ins_flag;
9783 MONO_ADD_INS (cfg->cbb, ins);
9785 if (ins_flag & MONO_INST_VOLATILE) {
9786 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
9787 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
9803 if (ins_flag & MONO_INST_VOLATILE) {
9804 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
9805 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
9808 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
9809 ins->flags |= ins_flag;
9812 MONO_ADD_INS (cfg->cbb, ins);
9814 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !MONO_INS_IS_PCONST_NULL (sp [1]))
9815 emit_write_barrier (cfg, sp [0], sp [1]);
9824 MONO_INST_NEW (cfg, ins, (*ip));
9826 ins->sreg1 = sp [0]->dreg;
9827 ins->sreg2 = sp [1]->dreg;
9828 type_from_op (cfg, ins, sp [0], sp [1]);
9830 ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type);
9832 /* Use the immediate opcodes if possible */
9833 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
9834 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
9835 if (imm_opcode != -1) {
9836 ins->opcode = imm_opcode;
9837 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
9840 NULLIFY_INS (sp [1]);
9844 MONO_ADD_INS ((cfg)->cbb, (ins));
9846 *sp++ = mono_decompose_opcode (cfg, ins);
9863 MONO_INST_NEW (cfg, ins, (*ip));
9865 ins->sreg1 = sp [0]->dreg;
9866 ins->sreg2 = sp [1]->dreg;
9867 type_from_op (cfg, ins, sp [0], sp [1]);
9869 add_widen_op (cfg, ins, &sp [0], &sp [1]);
9870 ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type);
9872 /* FIXME: Pass opcode to is_inst_imm */
9874 /* Use the immediate opcodes if possible */
9875 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
9876 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
9877 if (imm_opcode != -1) {
9878 ins->opcode = imm_opcode;
9879 if (sp [1]->opcode == OP_I8CONST) {
9880 #if SIZEOF_REGISTER == 8
9881 ins->inst_imm = sp [1]->inst_l;
9883 ins->inst_ls_word = sp [1]->inst_ls_word;
9884 ins->inst_ms_word = sp [1]->inst_ms_word;
9888 ins->inst_imm = (gssize)(sp [1]->inst_c0);
9891 /* Might be followed by an instruction added by add_widen_op */
9892 if (sp [1]->next == NULL)
9893 NULLIFY_INS (sp [1]);
9896 MONO_ADD_INS ((cfg)->cbb, (ins));
9898 *sp++ = mono_decompose_opcode (cfg, ins);
9911 case CEE_CONV_OVF_I8:
9912 case CEE_CONV_OVF_U8:
9916 /* Special case this earlier so we have long constants in the IR */
9917 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
9918 int data = sp [-1]->inst_c0;
9919 sp [-1]->opcode = OP_I8CONST;
9920 sp [-1]->type = STACK_I8;
9921 #if SIZEOF_REGISTER == 8
9922 if ((*ip) == CEE_CONV_U8)
9923 sp [-1]->inst_c0 = (guint32)data;
9925 sp [-1]->inst_c0 = data;
9927 sp [-1]->inst_ls_word = data;
9928 if ((*ip) == CEE_CONV_U8)
9929 sp [-1]->inst_ms_word = 0;
9931 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
9933 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
9940 case CEE_CONV_OVF_I4:
9941 case CEE_CONV_OVF_I1:
9942 case CEE_CONV_OVF_I2:
9943 case CEE_CONV_OVF_I:
9944 case CEE_CONV_OVF_U:
9947 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
9948 ADD_UNOP (CEE_CONV_OVF_I8);
9955 case CEE_CONV_OVF_U1:
9956 case CEE_CONV_OVF_U2:
9957 case CEE_CONV_OVF_U4:
9960 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
9961 ADD_UNOP (CEE_CONV_OVF_U8);
9968 case CEE_CONV_OVF_I1_UN:
9969 case CEE_CONV_OVF_I2_UN:
9970 case CEE_CONV_OVF_I4_UN:
9971 case CEE_CONV_OVF_I8_UN:
9972 case CEE_CONV_OVF_U1_UN:
9973 case CEE_CONV_OVF_U2_UN:
9974 case CEE_CONV_OVF_U4_UN:
9975 case CEE_CONV_OVF_U8_UN:
9976 case CEE_CONV_OVF_I_UN:
9977 case CEE_CONV_OVF_U_UN:
9984 CHECK_CFG_EXCEPTION;
9988 case CEE_ADD_OVF_UN:
9990 case CEE_MUL_OVF_UN:
9992 case CEE_SUB_OVF_UN:
9998 GSHAREDVT_FAILURE (*ip);
10001 token = read32 (ip + 1);
10002 klass = mini_get_class (method, token, generic_context);
10003 CHECK_TYPELOAD (klass);
10005 if (generic_class_is_reference_type (cfg, klass)) {
10006 MonoInst *store, *load;
10007 int dreg = alloc_ireg_ref (cfg);
10009 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
10010 load->flags |= ins_flag;
10011 MONO_ADD_INS (cfg->cbb, load);
10013 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
10014 store->flags |= ins_flag;
10015 MONO_ADD_INS (cfg->cbb, store);
10017 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER)
10018 emit_write_barrier (cfg, sp [0], sp [1]);
10020 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
10026 int loc_index = -1;
10032 token = read32 (ip + 1);
10033 klass = mini_get_class (method, token, generic_context);
10034 CHECK_TYPELOAD (klass);
10036 /* Optimize the common ldobj+stloc combination */
10039 loc_index = ip [6];
10046 loc_index = ip [5] - CEE_STLOC_0;
10053 if ((loc_index != -1) && ip_in_bb (cfg, cfg->cbb, ip + 5)) {
10054 CHECK_LOCAL (loc_index);
10056 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
10057 ins->dreg = cfg->locals [loc_index]->dreg;
10058 ins->flags |= ins_flag;
10061 if (ins_flag & MONO_INST_VOLATILE) {
10062 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10063 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10069 /* Optimize the ldobj+stobj combination */
10070 /* The reference case ends up being a load+store anyway */
10071 /* Skip this if the operation is volatile. */
10072 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass) && !(ins_flag & MONO_INST_VOLATILE)) {
10077 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
10084 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
10085 ins->flags |= ins_flag;
10088 if (ins_flag & MONO_INST_VOLATILE) {
10089 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10090 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10099 CHECK_STACK_OVF (1);
10101 n = read32 (ip + 1);
10103 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
10104 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
10105 ins->type = STACK_OBJ;
10108 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
10109 MonoInst *iargs [1];
10110 char *str = (char *)mono_method_get_wrapper_data (method, n);
10112 if (cfg->compile_aot)
10113 EMIT_NEW_LDSTRLITCONST (cfg, iargs [0], str);
10115 EMIT_NEW_PCONST (cfg, iargs [0], str);
10116 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
10118 if (cfg->opt & MONO_OPT_SHARED) {
10119 MonoInst *iargs [3];
10121 if (cfg->compile_aot) {
10122 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
10124 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10125 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
10126 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
10127 *sp = mono_emit_jit_icall (cfg, ves_icall_mono_ldstr, iargs);
10128 mono_ldstr_checked (cfg->domain, image, mono_metadata_token_index (n), &cfg->error);
10131 if (cfg->cbb->out_of_line) {
10132 MonoInst *iargs [2];
10134 if (image == mono_defaults.corlib) {
10136 * Avoid relocations in AOT and save some space by using a
10137 * version of helper_ldstr specialized to mscorlib.
10139 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
10140 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
10142 /* Avoid creating the string object */
10143 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
10144 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
10145 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
10149 if (cfg->compile_aot) {
10150 NEW_LDSTRCONST (cfg, ins, image, n);
10152 MONO_ADD_INS (cfg->cbb, ins);
10155 NEW_PCONST (cfg, ins, NULL);
10156 ins->type = STACK_OBJ;
10157 ins->inst_p0 = mono_ldstr_checked (cfg->domain, image, mono_metadata_token_index (n), &cfg->error);
10161 OUT_OF_MEMORY_FAILURE;
10164 MONO_ADD_INS (cfg->cbb, ins);
10173 MonoInst *iargs [2];
10174 MonoMethodSignature *fsig;
10177 MonoInst *vtable_arg = NULL;
10180 token = read32 (ip + 1);
10181 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
10184 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
10187 mono_save_token_info (cfg, image, token, cmethod);
10189 if (!mono_class_init (cmethod->klass))
10190 TYPE_LOAD_ERROR (cmethod->klass);
10192 context_used = mini_method_check_context_used (cfg, cmethod);
10194 if (mono_security_core_clr_enabled ())
10195 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
10197 if (cfg->gshared && cmethod && cmethod->klass != method->klass && mono_class_is_ginst (cmethod->klass) && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
10198 emit_class_init (cfg, cmethod->klass);
10199 CHECK_TYPELOAD (cmethod->klass);
10203 if (cfg->gsharedvt) {
10204 if (mini_is_gsharedvt_variable_signature (sig))
10205 GSHAREDVT_FAILURE (*ip);
10209 n = fsig->param_count;
10213 * Generate smaller code for the common newobj <exception> instruction in
10214 * argument checking code.
10216 if (cfg->cbb->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
10217 is_exception_class (cmethod->klass) && n <= 2 &&
10218 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
10219 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
10220 MonoInst *iargs [3];
10224 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
10227 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
10230 iargs [1] = sp [0];
10231 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
10234 iargs [1] = sp [0];
10235 iargs [2] = sp [1];
10236 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
10239 g_assert_not_reached ();
10247 /* move the args to allow room for 'this' in the first position */
10253 /* check_call_signature () requires sp[0] to be set */
10254 this_ins.type = STACK_OBJ;
10255 sp [0] = &this_ins;
10256 if (check_call_signature (cfg, fsig, sp))
10261 if (mini_class_is_system_array (cmethod->klass)) {
10262 *sp = emit_get_rgctx_method (cfg, context_used,
10263 cmethod, MONO_RGCTX_INFO_METHOD);
10265 /* Avoid varargs in the common case */
10266 if (fsig->param_count == 1)
10267 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
10268 else if (fsig->param_count == 2)
10269 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
10270 else if (fsig->param_count == 3)
10271 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
10272 else if (fsig->param_count == 4)
10273 alloc = mono_emit_jit_icall (cfg, mono_array_new_4, sp);
10275 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
10276 } else if (cmethod->string_ctor) {
10277 g_assert (!context_used);
10278 g_assert (!vtable_arg);
10279 /* we simply pass a null pointer */
10280 EMIT_NEW_PCONST (cfg, *sp, NULL);
10281 /* now call the string ctor */
10282 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, NULL, NULL, NULL);
10284 if (cmethod->klass->valuetype) {
10285 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
10286 emit_init_rvar (cfg, iargs [0]->dreg, &cmethod->klass->byval_arg);
10287 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
10292 * The code generated by mini_emit_virtual_call () expects
10293 * iargs [0] to be a boxed instance, but luckily the vcall
10294 * will be transformed into a normal call there.
10296 } else if (context_used) {
10297 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
10300 MonoVTable *vtable = NULL;
10302 if (!cfg->compile_aot)
10303 vtable = mono_class_vtable (cfg->domain, cmethod->klass);
10304 CHECK_TYPELOAD (cmethod->klass);
10307 * TypeInitializationExceptions thrown from the mono_runtime_class_init
10308 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
10309 * As a workaround, we call class cctors before allocating objects.
10311 if (mini_field_access_needs_cctor_run (cfg, method, cmethod->klass, vtable) && !(g_slist_find (class_inits, cmethod->klass))) {
10312 emit_class_init (cfg, cmethod->klass);
10313 if (cfg->verbose_level > 2)
10314 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
10315 class_inits = g_slist_prepend (class_inits, cmethod->klass);
10318 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
10321 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
10324 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
10326 /* Now call the actual ctor */
10327 handle_ctor_call (cfg, cmethod, fsig, context_used, sp, ip, &inline_costs);
10328 CHECK_CFG_EXCEPTION;
10331 if (alloc == NULL) {
10333 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
10334 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
10342 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip - header->code)))
10343 emit_seq_point (cfg, method, ip, FALSE, TRUE);
10346 case CEE_CASTCLASS:
10351 token = read32 (ip + 1);
10352 klass = mini_get_class (method, token, generic_context);
10353 CHECK_TYPELOAD (klass);
10354 if (sp [0]->type != STACK_OBJ)
10357 MONO_INST_NEW (cfg, ins, *ip == CEE_ISINST ? OP_ISINST : OP_CASTCLASS);
10358 ins->dreg = alloc_preg (cfg);
10359 ins->sreg1 = (*sp)->dreg;
10360 ins->klass = klass;
10361 ins->type = STACK_OBJ;
10362 MONO_ADD_INS (cfg->cbb, ins);
10364 CHECK_CFG_EXCEPTION;
10368 cfg->flags |= MONO_CFG_HAS_TYPE_CHECK;
10371 case CEE_UNBOX_ANY: {
10372 MonoInst *res, *addr;
10377 token = read32 (ip + 1);
10378 klass = mini_get_class (method, token, generic_context);
10379 CHECK_TYPELOAD (klass);
10381 mono_save_token_info (cfg, image, token, klass);
10383 context_used = mini_class_check_context_used (cfg, klass);
10385 if (mini_is_gsharedvt_klass (klass)) {
10386 res = handle_unbox_gsharedvt (cfg, klass, *sp);
10388 } else if (generic_class_is_reference_type (cfg, klass)) {
10389 if (MONO_INS_IS_PCONST_NULL (*sp)) {
10390 EMIT_NEW_PCONST (cfg, res, NULL);
10391 res->type = STACK_OBJ;
10393 MONO_INST_NEW (cfg, res, OP_CASTCLASS);
10394 res->dreg = alloc_preg (cfg);
10395 res->sreg1 = (*sp)->dreg;
10396 res->klass = klass;
10397 res->type = STACK_OBJ;
10398 MONO_ADD_INS (cfg->cbb, res);
10399 cfg->flags |= MONO_CFG_HAS_TYPE_CHECK;
10401 } else if (mono_class_is_nullable (klass)) {
10402 res = handle_unbox_nullable (cfg, *sp, klass, context_used);
10404 addr = handle_unbox (cfg, klass, sp, context_used);
10406 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
10417 MonoClass *enum_class;
10418 MonoMethod *has_flag;
10424 token = read32 (ip + 1);
10425 klass = mini_get_class (method, token, generic_context);
10426 CHECK_TYPELOAD (klass);
10428 mono_save_token_info (cfg, image, token, klass);
10430 context_used = mini_class_check_context_used (cfg, klass);
10432 if (generic_class_is_reference_type (cfg, klass)) {
10438 if (klass == mono_defaults.void_class)
10440 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
10442 /* frequent check in generic code: box (struct), brtrue */
10447 * <push int/long ptr>
10450 * constrained. MyFlags
10451 * callvirt instace bool class [mscorlib] System.Enum::HasFlag (class [mscorlib] System.Enum)
10453 * If we find this sequence and the operand types on box and constrained
10454 * are equal, we can emit a specialized instruction sequence instead of
10455 * the very slow HasFlag () call.
10457 if ((cfg->opt & MONO_OPT_INTRINS) &&
10458 /* Cheap checks first. */
10459 ip + 5 + 6 + 5 < end &&
10460 ip [5] == CEE_PREFIX1 &&
10461 ip [6] == CEE_CONSTRAINED_ &&
10462 ip [11] == CEE_CALLVIRT &&
10463 ip_in_bb (cfg, cfg->cbb, ip + 5 + 6 + 5) &&
10464 mono_class_is_enum (klass) &&
10465 (enum_class = mini_get_class (method, read32 (ip + 7), generic_context)) &&
10466 (has_flag = mini_get_method (cfg, method, read32 (ip + 12), NULL, generic_context)) &&
10467 has_flag->klass == mono_defaults.enum_class &&
10468 !strcmp (has_flag->name, "HasFlag") &&
10469 has_flag->signature->hasthis &&
10470 has_flag->signature->param_count == 1) {
10471 CHECK_TYPELOAD (enum_class);
10473 if (enum_class == klass) {
10474 MonoInst *enum_this, *enum_flag;
10479 enum_this = sp [0];
10480 enum_flag = sp [1];
10482 *sp++ = handle_enum_has_flag (cfg, klass, enum_this, enum_flag);
10487 // FIXME: LLVM can't handle the inconsistent bb linking
10488 if (!mono_class_is_nullable (klass) &&
10489 !mini_is_gsharedvt_klass (klass) &&
10490 ip + 5 < end && ip_in_bb (cfg, cfg->cbb, ip + 5) &&
10491 (ip [5] == CEE_BRTRUE ||
10492 ip [5] == CEE_BRTRUE_S ||
10493 ip [5] == CEE_BRFALSE ||
10494 ip [5] == CEE_BRFALSE_S)) {
10495 gboolean is_true = ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S;
10497 MonoBasicBlock *true_bb, *false_bb;
10501 if (cfg->verbose_level > 3) {
10502 printf ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
10503 printf ("<box+brtrue opt>\n");
10508 case CEE_BRFALSE_S:
10511 target = ip + 1 + (signed char)(*ip);
10518 target = ip + 4 + (gint)(read32 (ip));
10522 g_assert_not_reached ();
10526 * We need to link both bblocks, since it is needed for handling stack
10527 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
10528 * Branching to only one of them would lead to inconsistencies, so
10529 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
10531 GET_BBLOCK (cfg, true_bb, target);
10532 GET_BBLOCK (cfg, false_bb, ip);
10534 mono_link_bblock (cfg, cfg->cbb, true_bb);
10535 mono_link_bblock (cfg, cfg->cbb, false_bb);
10537 if (sp != stack_start) {
10538 handle_stack_args (cfg, stack_start, sp - stack_start);
10540 CHECK_UNVERIFIABLE (cfg);
10543 if (COMPILE_LLVM (cfg)) {
10544 dreg = alloc_ireg (cfg);
10545 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
10546 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
10548 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
10550 /* The JIT can't eliminate the iconst+compare */
10551 MONO_INST_NEW (cfg, ins, OP_BR);
10552 ins->inst_target_bb = is_true ? true_bb : false_bb;
10553 MONO_ADD_INS (cfg->cbb, ins);
10556 start_new_bblock = 1;
10560 *sp++ = handle_box (cfg, val, klass, context_used);
10562 CHECK_CFG_EXCEPTION;
10571 token = read32 (ip + 1);
10572 klass = mini_get_class (method, token, generic_context);
10573 CHECK_TYPELOAD (klass);
10575 mono_save_token_info (cfg, image, token, klass);
10577 context_used = mini_class_check_context_used (cfg, klass);
10579 if (mono_class_is_nullable (klass)) {
10582 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
10583 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
10587 ins = handle_unbox (cfg, klass, sp, context_used);
10600 MonoClassField *field;
10601 #ifndef DISABLE_REMOTING
10605 gboolean is_instance;
10607 gpointer addr = NULL;
10608 gboolean is_special_static;
10610 MonoInst *store_val = NULL;
10611 MonoInst *thread_ins;
10614 is_instance = (op == CEE_LDFLD || op == CEE_LDFLDA || op == CEE_STFLD);
10616 if (op == CEE_STFLD) {
10619 store_val = sp [1];
10624 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
10626 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
10629 if (op == CEE_STSFLD) {
10632 store_val = sp [0];
10637 token = read32 (ip + 1);
10638 if (method->wrapper_type != MONO_WRAPPER_NONE) {
10639 field = (MonoClassField *)mono_method_get_wrapper_data (method, token);
10640 klass = field->parent;
10643 field = mono_field_from_token_checked (image, token, &klass, generic_context, &cfg->error);
10646 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
10647 FIELD_ACCESS_FAILURE (method, field);
10648 mono_class_init (klass);
10650 /* if the class is Critical then transparent code cannot access it's fields */
10651 if (!is_instance && mono_security_core_clr_enabled ())
10652 ensure_method_is_allowed_to_access_field (cfg, method, field);
10654 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
10655 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
10656 if (mono_security_core_clr_enabled ())
10657 ensure_method_is_allowed_to_access_field (cfg, method, field);
10660 ftype = mono_field_get_type (field);
10663 * LDFLD etc. is usable on static fields as well, so convert those cases to
10666 if (is_instance && ftype->attrs & FIELD_ATTRIBUTE_STATIC) {
10678 g_assert_not_reached ();
10680 is_instance = FALSE;
10683 context_used = mini_class_check_context_used (cfg, klass);
10685 /* INSTANCE CASE */
10687 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
10688 if (op == CEE_STFLD) {
10689 if (target_type_is_incompatible (cfg, field->type, sp [1]))
10691 #ifndef DISABLE_REMOTING
10692 if ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class) {
10693 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
10694 MonoInst *iargs [5];
10696 GSHAREDVT_FAILURE (op);
10698 iargs [0] = sp [0];
10699 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10700 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
10701 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
10703 iargs [4] = sp [1];
10705 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
10706 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
10707 iargs, ip, cfg->real_offset, TRUE);
10708 CHECK_CFG_EXCEPTION;
10709 g_assert (costs > 0);
10711 cfg->real_offset += 5;
10713 inline_costs += costs;
10715 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
10720 MonoInst *store, *wbarrier_ptr_ins = NULL;
10722 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
10724 if (ins_flag & MONO_INST_VOLATILE) {
10725 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
10726 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
10729 if (mini_is_gsharedvt_klass (klass)) {
10730 MonoInst *offset_ins;
10732 context_used = mini_class_check_context_used (cfg, klass);
10734 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10735 /* The value is offset by 1 */
10736 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
10737 dreg = alloc_ireg_mp (cfg);
10738 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10739 wbarrier_ptr_ins = ins;
10740 /* The decomposition will call mini_emit_stobj () which will emit a wbarrier if needed */
10741 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, dreg, 0, sp [1]->dreg);
10743 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
10745 if (sp [0]->opcode != OP_LDADDR)
10746 store->flags |= MONO_INST_FAULT;
10748 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !MONO_INS_IS_PCONST_NULL (sp [1])) {
10749 if (mini_is_gsharedvt_klass (klass)) {
10750 g_assert (wbarrier_ptr_ins);
10751 emit_write_barrier (cfg, wbarrier_ptr_ins, sp [1]);
10753 /* insert call to write barrier */
10757 dreg = alloc_ireg_mp (cfg);
10758 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
10759 emit_write_barrier (cfg, ptr, sp [1]);
10763 store->flags |= ins_flag;
10770 #ifndef DISABLE_REMOTING
10771 if (is_instance && ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class)) {
10772 MonoMethod *wrapper = (op == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
10773 MonoInst *iargs [4];
10775 GSHAREDVT_FAILURE (op);
10777 iargs [0] = sp [0];
10778 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10779 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
10780 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
10781 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
10782 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
10783 iargs, ip, cfg->real_offset, TRUE);
10784 CHECK_CFG_EXCEPTION;
10785 g_assert (costs > 0);
10787 cfg->real_offset += 5;
10791 inline_costs += costs;
10793 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
10799 if (sp [0]->type == STACK_VTYPE) {
10802 /* Have to compute the address of the variable */
10804 var = get_vreg_to_inst (cfg, sp [0]->dreg);
10806 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
10808 g_assert (var->klass == klass);
10810 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
10814 if (op == CEE_LDFLDA) {
10815 if (sp [0]->type == STACK_OBJ) {
10816 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
10817 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
10820 dreg = alloc_ireg_mp (cfg);
10822 if (mini_is_gsharedvt_klass (klass)) {
10823 MonoInst *offset_ins;
10825 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10826 /* The value is offset by 1 */
10827 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
10828 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10830 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
10832 ins->klass = mono_class_from_mono_type (field->type);
10833 ins->type = STACK_MP;
10838 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
10840 if (sp [0]->opcode == OP_LDADDR && klass->simd_type && cfg->opt & MONO_OPT_SIMD) {
10841 ins = mono_emit_simd_field_load (cfg, field, sp [0]);
10850 if (mini_is_gsharedvt_klass (klass)) {
10851 MonoInst *offset_ins;
10853 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10854 /* The value is offset by 1 */
10855 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
10856 dreg = alloc_ireg_mp (cfg);
10857 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10858 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, dreg, 0);
10860 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
10862 load->flags |= ins_flag;
10863 if (sp [0]->opcode != OP_LDADDR)
10864 load->flags |= MONO_INST_FAULT;
10876 context_used = mini_class_check_context_used (cfg, klass);
10878 if (ftype->attrs & FIELD_ATTRIBUTE_LITERAL) {
10879 mono_error_set_field_load (&cfg->error, field->parent, field->name, "Using static instructions with literal field");
10883 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
10884 * to be called here.
10886 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
10887 mono_class_vtable (cfg->domain, klass);
10888 CHECK_TYPELOAD (klass);
10890 mono_domain_lock (cfg->domain);
10891 if (cfg->domain->special_static_fields)
10892 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
10893 mono_domain_unlock (cfg->domain);
10895 is_special_static = mono_class_field_is_special_static (field);
10897 if (is_special_static && ((gsize)addr & 0x80000000) == 0)
10898 thread_ins = mono_create_tls_get (cfg, TLS_KEY_THREAD);
10902 /* Generate IR to compute the field address */
10903 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && thread_ins && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
10905 * Fast access to TLS data
10906 * Inline version of get_thread_static_data () in
10910 int idx, static_data_reg, array_reg, dreg;
10912 if (context_used && cfg->gsharedvt && mini_is_gsharedvt_klass (klass))
10913 GSHAREDVT_FAILURE (op);
10915 static_data_reg = alloc_ireg (cfg);
10916 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, MONO_STRUCT_OFFSET (MonoInternalThread, static_data));
10918 if (cfg->compile_aot) {
10919 int offset_reg, offset2_reg, idx_reg;
10921 /* For TLS variables, this will return the TLS offset */
10922 EMIT_NEW_SFLDACONST (cfg, ins, field);
10923 offset_reg = ins->dreg;
10924 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
10925 idx_reg = alloc_ireg (cfg);
10926 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, idx_reg, offset_reg, 0x3f);
10927 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
10928 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
10929 array_reg = alloc_ireg (cfg);
10930 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
10931 offset2_reg = alloc_ireg (cfg);
10932 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_UN_IMM, offset2_reg, offset_reg, 6);
10933 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset2_reg, 0x1ffffff);
10934 dreg = alloc_ireg (cfg);
10935 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
10937 offset = (gsize)addr & 0x7fffffff;
10938 idx = offset & 0x3f;
10940 array_reg = alloc_ireg (cfg);
10941 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
10942 dreg = alloc_ireg (cfg);
10943 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, ((offset >> 6) & 0x1ffffff));
10945 } else if ((cfg->opt & MONO_OPT_SHARED) ||
10946 (cfg->compile_aot && is_special_static) ||
10947 (context_used && is_special_static)) {
10948 MonoInst *iargs [2];
10950 g_assert (field->parent);
10951 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10952 if (context_used) {
10953 iargs [1] = emit_get_rgctx_field (cfg, context_used,
10954 field, MONO_RGCTX_INFO_CLASS_FIELD);
10956 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
10958 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
10959 } else if (context_used) {
10960 MonoInst *static_data;
10963 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
10964 method->klass->name_space, method->klass->name, method->name,
10965 depth, field->offset);
10968 if (mono_class_needs_cctor_run (klass, method))
10969 emit_class_init (cfg, klass);
10972 * The pointer we're computing here is
10974 * super_info.static_data + field->offset
10976 static_data = mini_emit_get_rgctx_klass (cfg, context_used,
10977 klass, MONO_RGCTX_INFO_STATIC_DATA);
10979 if (mini_is_gsharedvt_klass (klass)) {
10980 MonoInst *offset_ins;
10982 offset_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10983 /* The value is offset by 1 */
10984 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
10985 dreg = alloc_ireg_mp (cfg);
10986 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, static_data->dreg, offset_ins->dreg);
10987 } else if (field->offset == 0) {
10990 int addr_reg = mono_alloc_preg (cfg);
10991 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
10993 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
10994 MonoInst *iargs [2];
10996 g_assert (field->parent);
10997 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10998 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
10999 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
11001 MonoVTable *vtable = NULL;
11003 if (!cfg->compile_aot)
11004 vtable = mono_class_vtable (cfg->domain, klass);
11005 CHECK_TYPELOAD (klass);
11008 if (mini_field_access_needs_cctor_run (cfg, method, klass, vtable)) {
11009 if (!(g_slist_find (class_inits, klass))) {
11010 emit_class_init (cfg, klass);
11011 if (cfg->verbose_level > 2)
11012 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
11013 class_inits = g_slist_prepend (class_inits, klass);
11016 if (cfg->run_cctors) {
11017 /* This makes so that inline cannot trigger */
11018 /* .cctors: too many apps depend on them */
11019 /* running with a specific order... */
11021 if (! vtable->initialized)
11022 INLINE_FAILURE ("class init");
11023 if (!mono_runtime_class_init_full (vtable, &cfg->error)) {
11024 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
11025 goto exception_exit;
11029 if (cfg->compile_aot)
11030 EMIT_NEW_SFLDACONST (cfg, ins, field);
11033 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
11035 EMIT_NEW_PCONST (cfg, ins, addr);
11038 MonoInst *iargs [1];
11039 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
11040 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
11044 /* Generate IR to do the actual load/store operation */
11046 if ((op == CEE_STFLD || op == CEE_STSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
11047 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
11048 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
11051 if (op == CEE_LDSFLDA) {
11052 ins->klass = mono_class_from_mono_type (ftype);
11053 ins->type = STACK_PTR;
11055 } else if (op == CEE_STSFLD) {
11058 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, ftype, ins->dreg, 0, store_val->dreg);
11059 store->flags |= ins_flag;
11061 gboolean is_const = FALSE;
11062 MonoVTable *vtable = NULL;
11063 gpointer addr = NULL;
11065 if (!context_used) {
11066 vtable = mono_class_vtable (cfg->domain, klass);
11067 CHECK_TYPELOAD (klass);
11069 if ((ftype->attrs & FIELD_ATTRIBUTE_INIT_ONLY) && (((addr = mono_aot_readonly_field_override (field)) != NULL) ||
11070 (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) && vtable->initialized))) {
11071 int ro_type = ftype->type;
11073 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
11074 if (ro_type == MONO_TYPE_VALUETYPE && ftype->data.klass->enumtype) {
11075 ro_type = mono_class_enum_basetype (ftype->data.klass)->type;
11078 GSHAREDVT_FAILURE (op);
11080 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
11083 case MONO_TYPE_BOOLEAN:
11085 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
11089 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
11092 case MONO_TYPE_CHAR:
11094 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
11098 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
11103 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
11107 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
11112 case MONO_TYPE_PTR:
11113 case MONO_TYPE_FNPTR:
11114 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
11115 type_to_eval_stack_type ((cfg), field->type, *sp);
11118 case MONO_TYPE_STRING:
11119 case MONO_TYPE_OBJECT:
11120 case MONO_TYPE_CLASS:
11121 case MONO_TYPE_SZARRAY:
11122 case MONO_TYPE_ARRAY:
11123 if (!mono_gc_is_moving ()) {
11124 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
11125 type_to_eval_stack_type ((cfg), field->type, *sp);
11133 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
11138 case MONO_TYPE_VALUETYPE:
11148 CHECK_STACK_OVF (1);
11150 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
11151 load->flags |= ins_flag;
11157 if ((op == CEE_LDFLD || op == CEE_LDSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
11158 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
11159 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
11170 token = read32 (ip + 1);
11171 klass = mini_get_class (method, token, generic_context);
11172 CHECK_TYPELOAD (klass);
11173 if (ins_flag & MONO_INST_VOLATILE) {
11174 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
11175 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
11177 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
11178 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
11179 ins->flags |= ins_flag;
11180 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
11181 generic_class_is_reference_type (cfg, klass) && !MONO_INS_IS_PCONST_NULL (sp [1])) {
11182 /* insert call to write barrier */
11183 emit_write_barrier (cfg, sp [0], sp [1]);
11195 const char *data_ptr;
11197 guint32 field_token;
11203 token = read32 (ip + 1);
11205 klass = mini_get_class (method, token, generic_context);
11206 CHECK_TYPELOAD (klass);
11208 context_used = mini_class_check_context_used (cfg, klass);
11210 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
11211 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_OVF_U4);
11212 ins->sreg1 = sp [0]->dreg;
11213 ins->type = STACK_I4;
11214 ins->dreg = alloc_ireg (cfg);
11215 MONO_ADD_INS (cfg->cbb, ins);
11216 *sp = mono_decompose_opcode (cfg, ins);
11219 if (context_used) {
11220 MonoInst *args [3];
11221 MonoClass *array_class = mono_array_class_get (klass, 1);
11222 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class);
11224 /* FIXME: Use OP_NEWARR and decompose later to help abcrem */
11227 args [0] = mini_emit_get_rgctx_klass (cfg, context_used,
11228 array_class, MONO_RGCTX_INFO_VTABLE);
11233 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
11235 ins = mono_emit_jit_icall (cfg, ves_icall_array_new_specific, args);
11237 if (cfg->opt & MONO_OPT_SHARED) {
11238 /* Decompose now to avoid problems with references to the domainvar */
11239 MonoInst *iargs [3];
11241 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11242 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
11243 iargs [2] = sp [0];
11245 ins = mono_emit_jit_icall (cfg, ves_icall_array_new, iargs);
11247 /* Decompose later since it is needed by abcrem */
11248 MonoClass *array_type = mono_array_class_get (klass, 1);
11249 mono_class_vtable (cfg->domain, array_type);
11250 CHECK_TYPELOAD (array_type);
11252 MONO_INST_NEW (cfg, ins, OP_NEWARR);
11253 ins->dreg = alloc_ireg_ref (cfg);
11254 ins->sreg1 = sp [0]->dreg;
11255 ins->inst_newa_class = klass;
11256 ins->type = STACK_OBJ;
11257 ins->klass = array_type;
11258 MONO_ADD_INS (cfg->cbb, ins);
11259 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
11260 cfg->cbb->has_array_access = TRUE;
11262 /* Needed so mono_emit_load_get_addr () gets called */
11263 mono_get_got_var (cfg);
11273 * we inline/optimize the initialization sequence if possible.
11274 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
11275 * for small sizes open code the memcpy
11276 * ensure the rva field is big enough
11278 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, cfg->cbb, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
11279 MonoMethod *memcpy_method = get_memcpy_method ();
11280 MonoInst *iargs [3];
11281 int add_reg = alloc_ireg_mp (cfg);
11283 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, MONO_STRUCT_OFFSET (MonoArray, vector));
11284 if (cfg->compile_aot) {
11285 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
11287 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
11289 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
11290 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
11299 if (sp [0]->type != STACK_OBJ)
11302 MONO_INST_NEW (cfg, ins, OP_LDLEN);
11303 ins->dreg = alloc_preg (cfg);
11304 ins->sreg1 = sp [0]->dreg;
11305 ins->type = STACK_I4;
11306 /* This flag will be inherited by the decomposition */
11307 ins->flags |= MONO_INST_FAULT;
11308 MONO_ADD_INS (cfg->cbb, ins);
11309 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
11310 cfg->cbb->has_array_access = TRUE;
11318 if (sp [0]->type != STACK_OBJ)
11321 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11323 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11324 CHECK_TYPELOAD (klass);
11325 /* we need to make sure that this array is exactly the type it needs
11326 * to be for correctness. the wrappers are lax with their usage
11327 * so we need to ignore them here
11329 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
11330 MonoClass *array_class = mono_array_class_get (klass, 1);
11331 mini_emit_check_array_type (cfg, sp [0], array_class);
11332 CHECK_TYPELOAD (array_class);
11336 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11341 case CEE_LDELEM_I1:
11342 case CEE_LDELEM_U1:
11343 case CEE_LDELEM_I2:
11344 case CEE_LDELEM_U2:
11345 case CEE_LDELEM_I4:
11346 case CEE_LDELEM_U4:
11347 case CEE_LDELEM_I8:
11349 case CEE_LDELEM_R4:
11350 case CEE_LDELEM_R8:
11351 case CEE_LDELEM_REF: {
11357 if (*ip == CEE_LDELEM) {
11359 token = read32 (ip + 1);
11360 klass = mini_get_class (method, token, generic_context);
11361 CHECK_TYPELOAD (klass);
11362 mono_class_init (klass);
11365 klass = array_access_to_klass (*ip);
11367 if (sp [0]->type != STACK_OBJ)
11370 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11372 if (mini_is_gsharedvt_variable_klass (klass)) {
11373 // FIXME-VT: OP_ICONST optimization
11374 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11375 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
11376 ins->opcode = OP_LOADV_MEMBASE;
11377 } else if (sp [1]->opcode == OP_ICONST) {
11378 int array_reg = sp [0]->dreg;
11379 int index_reg = sp [1]->dreg;
11380 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
11382 if (SIZEOF_REGISTER == 8 && COMPILE_LLVM (cfg))
11383 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, index_reg);
11385 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
11386 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
11388 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11389 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
11392 if (*ip == CEE_LDELEM)
11399 case CEE_STELEM_I1:
11400 case CEE_STELEM_I2:
11401 case CEE_STELEM_I4:
11402 case CEE_STELEM_I8:
11403 case CEE_STELEM_R4:
11404 case CEE_STELEM_R8:
11405 case CEE_STELEM_REF:
11410 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11412 if (*ip == CEE_STELEM) {
11414 token = read32 (ip + 1);
11415 klass = mini_get_class (method, token, generic_context);
11416 CHECK_TYPELOAD (klass);
11417 mono_class_init (klass);
11420 klass = array_access_to_klass (*ip);
11422 if (sp [0]->type != STACK_OBJ)
11425 emit_array_store (cfg, klass, sp, TRUE);
11427 if (*ip == CEE_STELEM)
11434 case CEE_CKFINITE: {
11438 if (cfg->llvm_only) {
11439 MonoInst *iargs [1];
11441 iargs [0] = sp [0];
11442 *sp++ = mono_emit_jit_icall (cfg, mono_ckfinite, iargs);
11444 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
11445 ins->sreg1 = sp [0]->dreg;
11446 ins->dreg = alloc_freg (cfg);
11447 ins->type = STACK_R8;
11448 MONO_ADD_INS (cfg->cbb, ins);
11450 *sp++ = mono_decompose_opcode (cfg, ins);
11456 case CEE_REFANYVAL: {
11457 MonoInst *src_var, *src;
11459 int klass_reg = alloc_preg (cfg);
11460 int dreg = alloc_preg (cfg);
11462 GSHAREDVT_FAILURE (*ip);
11465 MONO_INST_NEW (cfg, ins, *ip);
11468 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11469 CHECK_TYPELOAD (klass);
11471 context_used = mini_class_check_context_used (cfg, klass);
11474 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
11476 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
11477 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
11478 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass));
11480 if (context_used) {
11481 MonoInst *klass_ins;
11483 klass_ins = mini_emit_get_rgctx_klass (cfg, context_used,
11484 klass, MONO_RGCTX_INFO_KLASS);
11487 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
11488 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
11490 mini_emit_class_check (cfg, klass_reg, klass);
11492 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value));
11493 ins->type = STACK_MP;
11494 ins->klass = klass;
11499 case CEE_MKREFANY: {
11500 MonoInst *loc, *addr;
11502 GSHAREDVT_FAILURE (*ip);
11505 MONO_INST_NEW (cfg, ins, *ip);
11508 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11509 CHECK_TYPELOAD (klass);
11511 context_used = mini_class_check_context_used (cfg, klass);
11513 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
11514 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
11516 if (context_used) {
11517 MonoInst *const_ins;
11518 int type_reg = alloc_preg (cfg);
11520 const_ins = mini_emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
11521 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
11522 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
11523 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
11525 int const_reg = alloc_preg (cfg);
11526 int type_reg = alloc_preg (cfg);
11528 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
11529 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
11530 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
11531 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
11533 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
11535 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
11536 ins->type = STACK_VTYPE;
11537 ins->klass = mono_defaults.typed_reference_class;
11542 case CEE_LDTOKEN: {
11544 MonoClass *handle_class;
11546 CHECK_STACK_OVF (1);
11549 n = read32 (ip + 1);
11551 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
11552 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
11553 handle = mono_method_get_wrapper_data (method, n);
11554 handle_class = (MonoClass *)mono_method_get_wrapper_data (method, n + 1);
11555 if (handle_class == mono_defaults.typehandle_class)
11556 handle = &((MonoClass*)handle)->byval_arg;
11559 handle = mono_ldtoken_checked (image, n, &handle_class, generic_context, &cfg->error);
11564 mono_class_init (handle_class);
11565 if (cfg->gshared) {
11566 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
11567 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
11568 /* This case handles ldtoken
11569 of an open type, like for
11572 } else if (handle_class == mono_defaults.typehandle_class) {
11573 context_used = mini_class_check_context_used (cfg, mono_class_from_mono_type ((MonoType *)handle));
11574 } else if (handle_class == mono_defaults.fieldhandle_class)
11575 context_used = mini_class_check_context_used (cfg, ((MonoClassField*)handle)->parent);
11576 else if (handle_class == mono_defaults.methodhandle_class)
11577 context_used = mini_method_check_context_used (cfg, (MonoMethod *)handle);
11579 g_assert_not_reached ();
11582 if ((cfg->opt & MONO_OPT_SHARED) &&
11583 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
11584 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
11585 MonoInst *addr, *vtvar, *iargs [3];
11586 int method_context_used;
11588 method_context_used = mini_method_check_context_used (cfg, method);
11590 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
11592 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
11593 EMIT_NEW_ICONST (cfg, iargs [1], n);
11594 if (method_context_used) {
11595 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
11596 method, MONO_RGCTX_INFO_METHOD);
11597 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
11599 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
11600 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
11602 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11604 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
11606 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11608 if ((ip + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 5) &&
11609 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
11610 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
11611 (cmethod->klass == mono_defaults.systemtype_class) &&
11612 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
11613 MonoClass *tclass = mono_class_from_mono_type ((MonoType *)handle);
11615 mono_class_init (tclass);
11616 if (context_used) {
11617 ins = mini_emit_get_rgctx_klass (cfg, context_used,
11618 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
11619 } else if (cfg->compile_aot) {
11620 if (method->wrapper_type) {
11621 error_init (&error); //got to do it since there are multiple conditionals below
11622 if (mono_class_get_checked (tclass->image, tclass->type_token, &error) == tclass && !generic_context) {
11623 /* Special case for static synchronized wrappers */
11624 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
11626 mono_error_cleanup (&error); /* FIXME don't swallow the error */
11627 /* FIXME: n is not a normal token */
11629 EMIT_NEW_PCONST (cfg, ins, NULL);
11632 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
11635 MonoReflectionType *rt = mono_type_get_object_checked (cfg->domain, (MonoType *)handle, &cfg->error);
11637 EMIT_NEW_PCONST (cfg, ins, rt);
11639 ins->type = STACK_OBJ;
11640 ins->klass = cmethod->klass;
11643 MonoInst *addr, *vtvar;
11645 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
11647 if (context_used) {
11648 if (handle_class == mono_defaults.typehandle_class) {
11649 ins = mini_emit_get_rgctx_klass (cfg, context_used,
11650 mono_class_from_mono_type ((MonoType *)handle),
11651 MONO_RGCTX_INFO_TYPE);
11652 } else if (handle_class == mono_defaults.methodhandle_class) {
11653 ins = emit_get_rgctx_method (cfg, context_used,
11654 (MonoMethod *)handle, MONO_RGCTX_INFO_METHOD);
11655 } else if (handle_class == mono_defaults.fieldhandle_class) {
11656 ins = emit_get_rgctx_field (cfg, context_used,
11657 (MonoClassField *)handle, MONO_RGCTX_INFO_CLASS_FIELD);
11659 g_assert_not_reached ();
11661 } else if (cfg->compile_aot) {
11662 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n, generic_context);
11664 EMIT_NEW_PCONST (cfg, ins, handle);
11666 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11667 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
11668 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11678 if (sp [-1]->type != STACK_OBJ)
11681 MONO_INST_NEW (cfg, ins, OP_THROW);
11683 ins->sreg1 = sp [0]->dreg;
11685 cfg->cbb->out_of_line = TRUE;
11686 MONO_ADD_INS (cfg->cbb, ins);
11687 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
11688 MONO_ADD_INS (cfg->cbb, ins);
11691 link_bblock (cfg, cfg->cbb, end_bblock);
11692 start_new_bblock = 1;
11693 /* This can complicate code generation for llvm since the return value might not be defined */
11694 if (COMPILE_LLVM (cfg))
11695 INLINE_FAILURE ("throw");
11697 case CEE_ENDFINALLY:
11698 if (!ip_in_finally_clause (cfg, ip - header->code))
11700 /* mono_save_seq_point_info () depends on this */
11701 if (sp != stack_start)
11702 emit_seq_point (cfg, method, ip, FALSE, FALSE);
11703 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
11704 MONO_ADD_INS (cfg->cbb, ins);
11706 start_new_bblock = 1;
11709 * Control will leave the method so empty the stack, otherwise
11710 * the next basic block will start with a nonempty stack.
11712 while (sp != stack_start) {
11717 case CEE_LEAVE_S: {
11720 if (*ip == CEE_LEAVE) {
11722 target = ip + 5 + (gint32)read32(ip + 1);
11725 target = ip + 2 + (signed char)(ip [1]);
11728 /* empty the stack */
11729 while (sp != stack_start) {
11734 * If this leave statement is in a catch block, check for a
11735 * pending exception, and rethrow it if necessary.
11736 * We avoid doing this in runtime invoke wrappers, since those are called
11737 * by native code which excepts the wrapper to catch all exceptions.
11739 for (i = 0; i < header->num_clauses; ++i) {
11740 MonoExceptionClause *clause = &header->clauses [i];
11743 * Use <= in the final comparison to handle clauses with multiple
11744 * leave statements, like in bug #78024.
11745 * The ordering of the exception clauses guarantees that we find the
11746 * innermost clause.
11748 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
11750 MonoBasicBlock *dont_throw;
11755 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
11758 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
11760 NEW_BBLOCK (cfg, dont_throw);
11763 * Currently, we always rethrow the abort exception, despite the
11764 * fact that this is not correct. See thread6.cs for an example.
11765 * But propagating the abort exception is more important than
11766 * getting the sematics right.
11768 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
11769 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
11770 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
11772 MONO_START_BB (cfg, dont_throw);
11777 cfg->cbb->try_end = (intptr_t)(ip - header->code);
11780 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
11782 MonoExceptionClause *clause;
11784 for (tmp = handlers; tmp; tmp = tmp->next) {
11785 clause = (MonoExceptionClause *)tmp->data;
11786 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
11788 link_bblock (cfg, cfg->cbb, tblock);
11789 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
11790 ins->inst_target_bb = tblock;
11791 ins->inst_eh_block = clause;
11792 MONO_ADD_INS (cfg->cbb, ins);
11793 cfg->cbb->has_call_handler = 1;
11794 if (COMPILE_LLVM (cfg)) {
11795 MonoBasicBlock *target_bb;
11798 * Link the finally bblock with the target, since it will
11799 * conceptually branch there.
11801 GET_BBLOCK (cfg, tblock, cfg->cil_start + clause->handler_offset + clause->handler_len - 1);
11802 GET_BBLOCK (cfg, target_bb, target);
11803 link_bblock (cfg, tblock, target_bb);
11806 g_list_free (handlers);
11809 MONO_INST_NEW (cfg, ins, OP_BR);
11810 MONO_ADD_INS (cfg->cbb, ins);
11811 GET_BBLOCK (cfg, tblock, target);
11812 link_bblock (cfg, cfg->cbb, tblock);
11813 ins->inst_target_bb = tblock;
11815 start_new_bblock = 1;
11817 if (*ip == CEE_LEAVE)
11826 * Mono specific opcodes
11828 case MONO_CUSTOM_PREFIX: {
11830 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
11834 case CEE_MONO_ICALL: {
11836 MonoJitICallInfo *info;
11838 token = read32 (ip + 2);
11839 func = mono_method_get_wrapper_data (method, token);
11840 info = mono_find_jit_icall_by_addr (func);
11842 g_error ("Could not find icall address in wrapper %s", mono_method_full_name (method, 1));
11845 CHECK_STACK (info->sig->param_count);
11846 sp -= info->sig->param_count;
11848 ins = mono_emit_jit_icall (cfg, info->func, sp);
11849 if (!MONO_TYPE_IS_VOID (info->sig->ret))
11853 inline_costs += 10 * num_calls++;
11857 case CEE_MONO_LDPTR_CARD_TABLE:
11858 case CEE_MONO_LDPTR_NURSERY_START:
11859 case CEE_MONO_LDPTR_NURSERY_BITS:
11860 case CEE_MONO_LDPTR_INT_REQ_FLAG: {
11861 CHECK_STACK_OVF (1);
11864 case CEE_MONO_LDPTR_CARD_TABLE:
11865 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR, NULL);
11867 case CEE_MONO_LDPTR_NURSERY_START:
11868 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_NURSERY_START, NULL);
11870 case CEE_MONO_LDPTR_NURSERY_BITS:
11871 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_NURSERY_BITS, NULL);
11873 case CEE_MONO_LDPTR_INT_REQ_FLAG:
11874 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
11880 inline_costs += 10 * num_calls++;
11883 case CEE_MONO_LDPTR: {
11886 CHECK_STACK_OVF (1);
11888 token = read32 (ip + 2);
11890 ptr = mono_method_get_wrapper_data (method, token);
11891 EMIT_NEW_PCONST (cfg, ins, ptr);
11894 inline_costs += 10 * num_calls++;
11895 /* Can't embed random pointers into AOT code */
11899 case CEE_MONO_JIT_ICALL_ADDR: {
11900 MonoJitICallInfo *callinfo;
11903 CHECK_STACK_OVF (1);
11905 token = read32 (ip + 2);
11907 ptr = mono_method_get_wrapper_data (method, token);
11908 callinfo = mono_find_jit_icall_by_addr (ptr);
11909 g_assert (callinfo);
11910 EMIT_NEW_JIT_ICALL_ADDRCONST (cfg, ins, (char*)callinfo->name);
11913 inline_costs += 10 * num_calls++;
11916 case CEE_MONO_ICALL_ADDR: {
11917 MonoMethod *cmethod;
11920 CHECK_STACK_OVF (1);
11922 token = read32 (ip + 2);
11924 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
11926 if (cfg->compile_aot) {
11927 if (cfg->direct_pinvoke && ip + 6 < end && (ip [6] == CEE_POP)) {
11929 * This is generated by emit_native_wrapper () to resolve the pinvoke address
11930 * before the call, its not needed when using direct pinvoke.
11931 * This is not an optimization, but its used to avoid looking up pinvokes
11932 * on platforms which don't support dlopen ().
11934 EMIT_NEW_PCONST (cfg, ins, NULL);
11936 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
11939 ptr = mono_lookup_internal_call (cmethod);
11941 EMIT_NEW_PCONST (cfg, ins, ptr);
11947 case CEE_MONO_VTADDR: {
11948 MonoInst *src_var, *src;
11954 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
11955 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
11960 case CEE_MONO_NEWOBJ: {
11961 MonoInst *iargs [2];
11963 CHECK_STACK_OVF (1);
11965 token = read32 (ip + 2);
11966 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
11967 mono_class_init (klass);
11968 NEW_DOMAINCONST (cfg, iargs [0]);
11969 MONO_ADD_INS (cfg->cbb, iargs [0]);
11970 NEW_CLASSCONST (cfg, iargs [1], klass);
11971 MONO_ADD_INS (cfg->cbb, iargs [1]);
11972 *sp++ = mono_emit_jit_icall (cfg, ves_icall_object_new, iargs);
11974 inline_costs += 10 * num_calls++;
11977 case CEE_MONO_OBJADDR:
11980 MONO_INST_NEW (cfg, ins, OP_MOVE);
11981 ins->dreg = alloc_ireg_mp (cfg);
11982 ins->sreg1 = sp [0]->dreg;
11983 ins->type = STACK_MP;
11984 MONO_ADD_INS (cfg->cbb, ins);
11988 case CEE_MONO_LDNATIVEOBJ:
11990 * Similar to LDOBJ, but instead load the unmanaged
11991 * representation of the vtype to the stack.
11996 token = read32 (ip + 2);
11997 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
11998 g_assert (klass->valuetype);
11999 mono_class_init (klass);
12002 MonoInst *src, *dest, *temp;
12005 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
12006 temp->backend.is_pinvoke = 1;
12007 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
12008 mini_emit_stobj (cfg, dest, src, klass, TRUE);
12010 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
12011 dest->type = STACK_VTYPE;
12012 dest->klass = klass;
12018 case CEE_MONO_RETOBJ: {
12020 * Same as RET, but return the native representation of a vtype
12023 g_assert (cfg->ret);
12024 g_assert (mono_method_signature (method)->pinvoke);
12029 token = read32 (ip + 2);
12030 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12032 if (!cfg->vret_addr) {
12033 g_assert (cfg->ret_var_is_local);
12035 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
12037 EMIT_NEW_RETLOADA (cfg, ins);
12039 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
12041 if (sp != stack_start)
12044 MONO_INST_NEW (cfg, ins, OP_BR);
12045 ins->inst_target_bb = end_bblock;
12046 MONO_ADD_INS (cfg->cbb, ins);
12047 link_bblock (cfg, cfg->cbb, end_bblock);
12048 start_new_bblock = 1;
12052 case CEE_MONO_SAVE_LMF:
12053 case CEE_MONO_RESTORE_LMF:
12056 case CEE_MONO_CLASSCONST:
12057 CHECK_STACK_OVF (1);
12059 token = read32 (ip + 2);
12060 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
12063 inline_costs += 10 * num_calls++;
12065 case CEE_MONO_NOT_TAKEN:
12066 cfg->cbb->out_of_line = TRUE;
12069 case CEE_MONO_TLS: {
12072 CHECK_STACK_OVF (1);
12074 key = (MonoTlsKey)read32 (ip + 2);
12075 g_assert (key < TLS_KEY_NUM);
12077 ins = mono_create_tls_get (cfg, key);
12079 ins->type = STACK_PTR;
12084 case CEE_MONO_DYN_CALL: {
12085 MonoCallInst *call;
12087 /* It would be easier to call a trampoline, but that would put an
12088 * extra frame on the stack, confusing exception handling. So
12089 * implement it inline using an opcode for now.
12092 if (!cfg->dyn_call_var) {
12093 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
12094 /* prevent it from being register allocated */
12095 cfg->dyn_call_var->flags |= MONO_INST_VOLATILE;
12098 /* Has to use a call inst since it local regalloc expects it */
12099 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
12100 ins = (MonoInst*)call;
12102 ins->sreg1 = sp [0]->dreg;
12103 ins->sreg2 = sp [1]->dreg;
12104 MONO_ADD_INS (cfg->cbb, ins);
12106 cfg->param_area = MAX (cfg->param_area, cfg->backend->dyn_call_param_area);
12109 inline_costs += 10 * num_calls++;
12113 case CEE_MONO_MEMORY_BARRIER: {
12115 emit_memory_barrier (cfg, (int)read32 (ip + 2));
12119 case CEE_MONO_ATOMIC_STORE_I4: {
12120 g_assert (mono_arch_opcode_supported (OP_ATOMIC_STORE_I4));
12126 MONO_INST_NEW (cfg, ins, OP_ATOMIC_STORE_I4);
12127 ins->dreg = sp [0]->dreg;
12128 ins->sreg1 = sp [1]->dreg;
12129 ins->backend.memory_barrier_kind = (int) read32 (ip + 2);
12130 MONO_ADD_INS (cfg->cbb, ins);
12135 case CEE_MONO_JIT_ATTACH: {
12136 MonoInst *args [16], *domain_ins;
12137 MonoInst *ad_ins, *jit_tls_ins;
12138 MonoBasicBlock *next_bb = NULL, *call_bb = NULL;
12140 g_assert (!mono_threads_is_coop_enabled ());
12142 cfg->orig_domain_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
12144 EMIT_NEW_PCONST (cfg, ins, NULL);
12145 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
12147 ad_ins = mono_create_tls_get (cfg, TLS_KEY_DOMAIN);
12148 jit_tls_ins = mono_create_tls_get (cfg, TLS_KEY_JIT_TLS);
12150 if (ad_ins && jit_tls_ins) {
12151 NEW_BBLOCK (cfg, next_bb);
12152 NEW_BBLOCK (cfg, call_bb);
12154 if (cfg->compile_aot) {
12155 /* AOT code is only used in the root domain */
12156 EMIT_NEW_PCONST (cfg, domain_ins, NULL);
12158 EMIT_NEW_PCONST (cfg, domain_ins, cfg->domain);
12160 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, ad_ins->dreg, domain_ins->dreg);
12161 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, call_bb);
12163 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, jit_tls_ins->dreg, 0);
12164 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, call_bb);
12166 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, next_bb);
12167 MONO_START_BB (cfg, call_bb);
12170 /* AOT code is only used in the root domain */
12171 EMIT_NEW_PCONST (cfg, args [0], cfg->compile_aot ? NULL : cfg->domain);
12172 if (cfg->compile_aot) {
12176 * This is called on unattached threads, so it cannot go through the trampoline
12177 * infrastructure. Use an indirect call through a got slot initialized at load time
12180 EMIT_NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_JIT_THREAD_ATTACH, NULL);
12181 ins = mono_emit_calli (cfg, helper_sig_jit_thread_attach, args, addr, NULL, NULL);
12183 ins = mono_emit_jit_icall (cfg, mono_jit_thread_attach, args);
12185 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
12188 MONO_START_BB (cfg, next_bb);
12193 case CEE_MONO_JIT_DETACH: {
12194 MonoInst *args [16];
12196 /* Restore the original domain */
12197 dreg = alloc_ireg (cfg);
12198 EMIT_NEW_UNALU (cfg, args [0], OP_MOVE, dreg, cfg->orig_domain_var->dreg);
12199 mono_emit_jit_icall (cfg, mono_jit_set_domain, args);
12203 case CEE_MONO_CALLI_EXTRA_ARG: {
12205 MonoMethodSignature *fsig;
12209 * This is the same as CEE_CALLI, but passes an additional argument
12210 * to the called method in llvmonly mode.
12211 * This is only used by delegate invoke wrappers to call the
12212 * actual delegate method.
12214 g_assert (method->wrapper_type == MONO_WRAPPER_DELEGATE_INVOKE);
12217 token = read32 (ip + 2);
12225 fsig = mini_get_signature (method, token, generic_context, &cfg->error);
12228 if (cfg->llvm_only)
12229 cfg->signatures = g_slist_prepend_mempool (cfg->mempool, cfg->signatures, fsig);
12231 n = fsig->param_count + fsig->hasthis + 1;
12238 if (cfg->llvm_only) {
12240 * The lowest bit of 'arg' determines whenever the callee uses the gsharedvt
12241 * cconv. This is set by mono_init_delegate ().
12243 if (cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig)) {
12244 MonoInst *callee = addr;
12245 MonoInst *call, *localloc_ins;
12246 MonoBasicBlock *is_gsharedvt_bb, *end_bb;
12247 int low_bit_reg = alloc_preg (cfg);
12249 NEW_BBLOCK (cfg, is_gsharedvt_bb);
12250 NEW_BBLOCK (cfg, end_bb);
12252 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, low_bit_reg, arg->dreg, 1);
12253 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, low_bit_reg, 0);
12254 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, is_gsharedvt_bb);
12256 /* Normal case: callee uses a normal cconv, have to add an out wrapper */
12257 addr = emit_get_rgctx_sig (cfg, context_used,
12258 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
12260 * ADDR points to a gsharedvt-out wrapper, have to pass <callee, arg> as an extra arg.
12262 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
12263 ins->dreg = alloc_preg (cfg);
12264 ins->inst_imm = 2 * SIZEOF_VOID_P;
12265 MONO_ADD_INS (cfg->cbb, ins);
12266 localloc_ins = ins;
12267 cfg->flags |= MONO_CFG_HAS_ALLOCA;
12268 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, 0, callee->dreg);
12269 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, SIZEOF_VOID_P, arg->dreg);
12271 call = emit_extra_arg_calli (cfg, fsig, sp, localloc_ins->dreg, addr);
12272 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
12274 /* Gsharedvt case: callee uses a gsharedvt cconv, no conversion is needed */
12275 MONO_START_BB (cfg, is_gsharedvt_bb);
12276 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PXOR_IMM, arg->dreg, arg->dreg, 1);
12277 ins = emit_extra_arg_calli (cfg, fsig, sp, arg->dreg, callee);
12278 ins->dreg = call->dreg;
12280 MONO_START_BB (cfg, end_bb);
12282 /* Caller uses a normal calling conv */
12284 MonoInst *callee = addr;
12285 MonoInst *call, *localloc_ins;
12286 MonoBasicBlock *is_gsharedvt_bb, *end_bb;
12287 int low_bit_reg = alloc_preg (cfg);
12289 NEW_BBLOCK (cfg, is_gsharedvt_bb);
12290 NEW_BBLOCK (cfg, end_bb);
12292 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, low_bit_reg, arg->dreg, 1);
12293 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, low_bit_reg, 0);
12294 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, is_gsharedvt_bb);
12296 /* Normal case: callee uses a normal cconv, no conversion is needed */
12297 call = emit_extra_arg_calli (cfg, fsig, sp, arg->dreg, callee);
12298 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
12299 /* Gsharedvt case: callee uses a gsharedvt cconv, have to add an in wrapper */
12300 MONO_START_BB (cfg, is_gsharedvt_bb);
12301 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PXOR_IMM, arg->dreg, arg->dreg, 1);
12302 NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_GSHAREDVT_IN_WRAPPER, fsig);
12303 MONO_ADD_INS (cfg->cbb, addr);
12305 * ADDR points to a gsharedvt-in wrapper, have to pass <callee, arg> as an extra arg.
12307 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
12308 ins->dreg = alloc_preg (cfg);
12309 ins->inst_imm = 2 * SIZEOF_VOID_P;
12310 MONO_ADD_INS (cfg->cbb, ins);
12311 localloc_ins = ins;
12312 cfg->flags |= MONO_CFG_HAS_ALLOCA;
12313 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, 0, callee->dreg);
12314 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, SIZEOF_VOID_P, arg->dreg);
12316 ins = emit_extra_arg_calli (cfg, fsig, sp, localloc_ins->dreg, addr);
12317 ins->dreg = call->dreg;
12318 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
12320 MONO_START_BB (cfg, end_bb);
12323 /* Same as CEE_CALLI */
12324 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
12326 * We pass the address to the gsharedvt trampoline in the rgctx reg
12328 MonoInst *callee = addr;
12330 addr = emit_get_rgctx_sig (cfg, context_used,
12331 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
12332 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, callee);
12334 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
12338 if (!MONO_TYPE_IS_VOID (fsig->ret))
12339 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
12341 CHECK_CFG_EXCEPTION;
12345 constrained_class = NULL;
12348 case CEE_MONO_LDDOMAIN:
12349 CHECK_STACK_OVF (1);
12350 EMIT_NEW_PCONST (cfg, ins, cfg->compile_aot ? NULL : cfg->domain);
12354 case CEE_MONO_GET_LAST_ERROR:
12356 CHECK_STACK_OVF (1);
12358 MONO_INST_NEW (cfg, ins, OP_GET_LAST_ERROR);
12359 ins->dreg = alloc_dreg (cfg, STACK_I4);
12360 ins->type = STACK_I4;
12361 MONO_ADD_INS (cfg->cbb, ins);
12367 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
12373 case CEE_PREFIX1: {
12376 case CEE_ARGLIST: {
12377 /* somewhat similar to LDTOKEN */
12378 MonoInst *addr, *vtvar;
12379 CHECK_STACK_OVF (1);
12380 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
12382 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
12383 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
12385 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
12386 ins->type = STACK_VTYPE;
12387 ins->klass = mono_defaults.argumenthandle_class;
12397 MonoInst *cmp, *arg1, *arg2;
12405 * The following transforms:
12406 * CEE_CEQ into OP_CEQ
12407 * CEE_CGT into OP_CGT
12408 * CEE_CGT_UN into OP_CGT_UN
12409 * CEE_CLT into OP_CLT
12410 * CEE_CLT_UN into OP_CLT_UN
12412 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
12414 MONO_INST_NEW (cfg, ins, cmp->opcode);
12415 cmp->sreg1 = arg1->dreg;
12416 cmp->sreg2 = arg2->dreg;
12417 type_from_op (cfg, cmp, arg1, arg2);
12419 add_widen_op (cfg, cmp, &arg1, &arg2);
12420 if ((arg1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((arg1->type == STACK_PTR) || (arg1->type == STACK_OBJ) || (arg1->type == STACK_MP))))
12421 cmp->opcode = OP_LCOMPARE;
12422 else if (arg1->type == STACK_R4)
12423 cmp->opcode = OP_RCOMPARE;
12424 else if (arg1->type == STACK_R8)
12425 cmp->opcode = OP_FCOMPARE;
12427 cmp->opcode = OP_ICOMPARE;
12428 MONO_ADD_INS (cfg->cbb, cmp);
12429 ins->type = STACK_I4;
12430 ins->dreg = alloc_dreg (cfg, (MonoStackType)ins->type);
12431 type_from_op (cfg, ins, arg1, arg2);
12433 if (cmp->opcode == OP_FCOMPARE || cmp->opcode == OP_RCOMPARE) {
12435 * The backends expect the fceq opcodes to do the
12438 ins->sreg1 = cmp->sreg1;
12439 ins->sreg2 = cmp->sreg2;
12442 MONO_ADD_INS (cfg->cbb, ins);
12448 MonoInst *argconst;
12449 MonoMethod *cil_method;
12451 CHECK_STACK_OVF (1);
12453 n = read32 (ip + 2);
12454 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
12457 mono_class_init (cmethod->klass);
12459 mono_save_token_info (cfg, image, n, cmethod);
12461 context_used = mini_method_check_context_used (cfg, cmethod);
12463 cil_method = cmethod;
12464 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
12465 emit_method_access_failure (cfg, method, cil_method);
12467 if (mono_security_core_clr_enabled ())
12468 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
12471 * Optimize the common case of ldftn+delegate creation
12473 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
12474 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
12475 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
12476 MonoInst *target_ins, *handle_ins;
12477 MonoMethod *invoke;
12478 int invoke_context_used;
12480 invoke = mono_get_delegate_invoke (ctor_method->klass);
12481 if (!invoke || !mono_method_signature (invoke))
12484 invoke_context_used = mini_method_check_context_used (cfg, invoke);
12486 target_ins = sp [-1];
12488 if (mono_security_core_clr_enabled ())
12489 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method);
12491 if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
12492 /*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
12493 if (mono_method_signature (invoke)->param_count == mono_method_signature (cmethod)->param_count) {
12494 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0);
12495 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException");
12499 /* FIXME: SGEN support */
12500 if (invoke_context_used == 0 || cfg->llvm_only) {
12502 if (cfg->verbose_level > 3)
12503 g_print ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
12504 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, FALSE))) {
12507 CHECK_CFG_EXCEPTION;
12517 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
12518 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
12522 inline_costs += 10 * num_calls++;
12525 case CEE_LDVIRTFTN: {
12526 MonoInst *args [2];
12530 n = read32 (ip + 2);
12531 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
12534 mono_class_init (cmethod->klass);
12536 context_used = mini_method_check_context_used (cfg, cmethod);
12538 if (mono_security_core_clr_enabled ())
12539 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
12542 * Optimize the common case of ldvirtftn+delegate creation
12544 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 6) && (ip [6] == CEE_NEWOBJ) && (ip > header->code && ip [-1] == CEE_DUP)) {
12545 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
12546 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
12547 MonoInst *target_ins, *handle_ins;
12548 MonoMethod *invoke;
12549 int invoke_context_used;
12550 gboolean is_virtual = cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL;
12552 invoke = mono_get_delegate_invoke (ctor_method->klass);
12553 if (!invoke || !mono_method_signature (invoke))
12556 invoke_context_used = mini_method_check_context_used (cfg, invoke);
12558 target_ins = sp [-1];
12560 if (mono_security_core_clr_enabled ())
12561 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method);
12563 /* FIXME: SGEN support */
12564 if (invoke_context_used == 0 || cfg->llvm_only) {
12566 if (cfg->verbose_level > 3)
12567 g_print ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
12568 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, is_virtual))) {
12571 CHECK_CFG_EXCEPTION;
12584 args [1] = emit_get_rgctx_method (cfg, context_used,
12585 cmethod, MONO_RGCTX_INFO_METHOD);
12588 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
12590 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
12593 inline_costs += 10 * num_calls++;
12597 CHECK_STACK_OVF (1);
12599 n = read16 (ip + 2);
12601 EMIT_NEW_ARGLOAD (cfg, ins, n);
12606 CHECK_STACK_OVF (1);
12608 n = read16 (ip + 2);
12610 NEW_ARGLOADA (cfg, ins, n);
12611 MONO_ADD_INS (cfg->cbb, ins);
12619 n = read16 (ip + 2);
12621 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
12623 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
12627 CHECK_STACK_OVF (1);
12629 n = read16 (ip + 2);
12631 EMIT_NEW_LOCLOAD (cfg, ins, n);
12636 unsigned char *tmp_ip;
12637 CHECK_STACK_OVF (1);
12639 n = read16 (ip + 2);
12642 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
12648 EMIT_NEW_LOCLOADA (cfg, ins, n);
12657 n = read16 (ip + 2);
12659 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
12661 emit_stloc_ir (cfg, sp, header, n);
12665 case CEE_LOCALLOC: {
12667 MonoBasicBlock *non_zero_bb, *end_bb;
12668 int alloc_ptr = alloc_preg (cfg);
12670 if (sp != stack_start)
12672 if (cfg->method != method)
12674 * Inlining this into a loop in a parent could lead to
12675 * stack overflows which is different behavior than the
12676 * non-inlined case, thus disable inlining in this case.
12678 INLINE_FAILURE("localloc");
12680 NEW_BBLOCK (cfg, non_zero_bb);
12681 NEW_BBLOCK (cfg, end_bb);
12683 /* if size != zero */
12684 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
12685 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, non_zero_bb);
12687 //size is zero, so result is NULL
12688 MONO_EMIT_NEW_PCONST (cfg, alloc_ptr, NULL);
12689 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
12691 MONO_START_BB (cfg, non_zero_bb);
12692 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
12693 ins->dreg = alloc_ptr;
12694 ins->sreg1 = sp [0]->dreg;
12695 ins->type = STACK_PTR;
12696 MONO_ADD_INS (cfg->cbb, ins);
12698 cfg->flags |= MONO_CFG_HAS_ALLOCA;
12700 ins->flags |= MONO_INST_INIT;
12702 MONO_START_BB (cfg, end_bb);
12703 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, alloc_preg (cfg), alloc_ptr);
12704 ins->type = STACK_PTR;
12710 case CEE_ENDFILTER: {
12711 MonoExceptionClause *clause, *nearest;
12716 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
12718 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
12719 ins->sreg1 = (*sp)->dreg;
12720 MONO_ADD_INS (cfg->cbb, ins);
12721 start_new_bblock = 1;
12725 for (cc = 0; cc < header->num_clauses; ++cc) {
12726 clause = &header->clauses [cc];
12727 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
12728 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
12729 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset)))
12732 g_assert (nearest);
12733 if ((ip - header->code) != nearest->handler_offset)
12738 case CEE_UNALIGNED_:
12739 ins_flag |= MONO_INST_UNALIGNED;
12740 /* FIXME: record alignment? we can assume 1 for now */
12744 case CEE_VOLATILE_:
12745 ins_flag |= MONO_INST_VOLATILE;
12749 ins_flag |= MONO_INST_TAILCALL;
12750 cfg->flags |= MONO_CFG_HAS_TAIL;
12751 /* Can't inline tail calls at this time */
12752 inline_costs += 100000;
12759 token = read32 (ip + 2);
12760 klass = mini_get_class (method, token, generic_context);
12761 CHECK_TYPELOAD (klass);
12762 if (generic_class_is_reference_type (cfg, klass))
12763 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
12765 mini_emit_initobj (cfg, *sp, NULL, klass);
12769 case CEE_CONSTRAINED_:
12771 token = read32 (ip + 2);
12772 constrained_class = mini_get_class (method, token, generic_context);
12773 CHECK_TYPELOAD (constrained_class);
12777 case CEE_INITBLK: {
12778 MonoInst *iargs [3];
12782 /* Skip optimized paths for volatile operations. */
12783 if ((ip [1] == CEE_CPBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
12784 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
12785 } else if ((ip [1] == CEE_INITBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
12786 /* emit_memset only works when val == 0 */
12787 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
12790 iargs [0] = sp [0];
12791 iargs [1] = sp [1];
12792 iargs [2] = sp [2];
12793 if (ip [1] == CEE_CPBLK) {
12795 * FIXME: It's unclear whether we should be emitting both the acquire
12796 * and release barriers for cpblk. It is technically both a load and
12797 * store operation, so it seems like that's the sensible thing to do.
12799 * FIXME: We emit full barriers on both sides of the operation for
12800 * simplicity. We should have a separate atomic memcpy method instead.
12802 MonoMethod *memcpy_method = get_memcpy_method ();
12804 if (ins_flag & MONO_INST_VOLATILE)
12805 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
12807 call = mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
12808 call->flags |= ins_flag;
12810 if (ins_flag & MONO_INST_VOLATILE)
12811 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
12813 MonoMethod *memset_method = get_memset_method ();
12814 if (ins_flag & MONO_INST_VOLATILE) {
12815 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
12816 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
12818 call = mono_emit_method_call (cfg, memset_method, iargs, NULL);
12819 call->flags |= ins_flag;
12830 ins_flag |= MONO_INST_NOTYPECHECK;
12832 ins_flag |= MONO_INST_NORANGECHECK;
12833 /* we ignore the no-nullcheck for now since we
12834 * really do it explicitly only when doing callvirt->call
12838 case CEE_RETHROW: {
12840 int handler_offset = -1;
12842 for (i = 0; i < header->num_clauses; ++i) {
12843 MonoExceptionClause *clause = &header->clauses [i];
12844 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
12845 handler_offset = clause->handler_offset;
12850 cfg->cbb->flags |= BB_EXCEPTION_UNSAFE;
12852 if (handler_offset == -1)
12855 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
12856 MONO_INST_NEW (cfg, ins, OP_RETHROW);
12857 ins->sreg1 = load->dreg;
12858 MONO_ADD_INS (cfg->cbb, ins);
12860 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
12861 MONO_ADD_INS (cfg->cbb, ins);
12864 link_bblock (cfg, cfg->cbb, end_bblock);
12865 start_new_bblock = 1;
12873 CHECK_STACK_OVF (1);
12875 token = read32 (ip + 2);
12876 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !image_is_dynamic (method->klass->image) && !generic_context) {
12877 MonoType *type = mono_type_create_from_typespec_checked (image, token, &cfg->error);
12880 val = mono_type_size (type, &ialign);
12882 MonoClass *klass = mini_get_class (method, token, generic_context);
12883 CHECK_TYPELOAD (klass);
12885 val = mono_type_size (&klass->byval_arg, &ialign);
12887 if (mini_is_gsharedvt_klass (klass))
12888 GSHAREDVT_FAILURE (*ip);
12890 EMIT_NEW_ICONST (cfg, ins, val);
12895 case CEE_REFANYTYPE: {
12896 MonoInst *src_var, *src;
12898 GSHAREDVT_FAILURE (*ip);
12904 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
12906 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
12907 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
12908 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type));
12913 case CEE_READONLY_:
12926 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
12936 g_warning ("opcode 0x%02x not handled", *ip);
12940 if (start_new_bblock != 1)
12943 cfg->cbb->cil_length = ip - cfg->cbb->cil_code;
12944 if (cfg->cbb->next_bb) {
12945 /* This could already be set because of inlining, #693905 */
12946 MonoBasicBlock *bb = cfg->cbb;
12948 while (bb->next_bb)
12950 bb->next_bb = end_bblock;
12952 cfg->cbb->next_bb = end_bblock;
12955 if (cfg->method == method && cfg->domainvar) {
12957 MonoInst *get_domain;
12959 cfg->cbb = init_localsbb;
12961 get_domain = mono_create_tls_get (cfg, TLS_KEY_DOMAIN);
12962 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
12963 MONO_ADD_INS (cfg->cbb, store);
12966 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
12967 if (cfg->compile_aot)
12968 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
12969 mono_get_got_var (cfg);
12972 if (cfg->method == method && cfg->got_var)
12973 mono_emit_load_got_addr (cfg);
12975 if (init_localsbb) {
12976 cfg->cbb = init_localsbb;
12978 for (i = 0; i < header->num_locals; ++i) {
12979 emit_init_local (cfg, i, header->locals [i], init_locals);
12983 if (cfg->init_ref_vars && cfg->method == method) {
12984 /* Emit initialization for ref vars */
12985 // FIXME: Avoid duplication initialization for IL locals.
12986 for (i = 0; i < cfg->num_varinfo; ++i) {
12987 MonoInst *ins = cfg->varinfo [i];
12989 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
12990 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
12994 if (cfg->lmf_var && cfg->method == method && !cfg->llvm_only) {
12995 cfg->cbb = init_localsbb;
12996 emit_push_lmf (cfg);
12999 cfg->cbb = init_localsbb;
13000 emit_instrumentation_call (cfg, mono_profiler_method_enter);
13003 MonoBasicBlock *bb;
13006 * Make seq points at backward branch targets interruptable.
13008 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
13009 if (bb->code && bb->in_count > 1 && bb->code->opcode == OP_SEQ_POINT)
13010 bb->code->flags |= MONO_INST_SINGLE_STEP_LOC;
13013 /* Add a sequence point for method entry/exit events */
13014 if (seq_points && cfg->gen_sdb_seq_points) {
13015 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
13016 MONO_ADD_INS (init_localsbb, ins);
13017 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
13018 MONO_ADD_INS (cfg->bb_exit, ins);
13022 * Add seq points for IL offsets which have line number info, but wasn't generated a seq point during JITting because
13023 * the code they refer to was dead (#11880).
13025 if (sym_seq_points) {
13026 for (i = 0; i < header->code_size; ++i) {
13027 if (mono_bitset_test_fast (seq_point_locs, i) && !mono_bitset_test_fast (seq_point_set_locs, i)) {
13030 NEW_SEQ_POINT (cfg, ins, i, FALSE);
13031 mono_add_seq_point (cfg, NULL, ins, SEQ_POINT_NATIVE_OFFSET_DEAD_CODE);
13038 if (cfg->method == method) {
13039 MonoBasicBlock *bb;
13040 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13041 if (bb == cfg->bb_init)
13044 bb->region = mono_find_block_region (cfg, bb->real_offset);
13046 mono_create_spvar_for_region (cfg, bb->region);
13047 if (cfg->verbose_level > 2)
13048 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
13051 MonoBasicBlock *bb;
13052 /* get_most_deep_clause () in mini-llvm.c depends on this for inlined bblocks */
13053 for (bb = start_bblock; bb != end_bblock; bb = bb->next_bb) {
13054 bb->real_offset = inline_offset;
13058 if (inline_costs < 0) {
13061 /* Method is too large */
13062 mname = mono_method_full_name (method, TRUE);
13063 mono_cfg_set_exception_invalid_program (cfg, g_strdup_printf ("Method %s is too complex.", mname));
13067 if ((cfg->verbose_level > 2) && (cfg->method == method))
13068 mono_print_code (cfg, "AFTER METHOD-TO-IR");
13073 g_assert (!mono_error_ok (&cfg->error));
13077 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
13081 set_exception_type_from_invalid_il (cfg, method, ip);
13085 g_slist_free (class_inits);
13086 mono_basic_block_free (original_bb);
13087 cfg->dont_inline = g_list_remove (cfg->dont_inline, method);
13088 if (cfg->exception_type)
13091 return inline_costs;
13095 store_membase_reg_to_store_membase_imm (int opcode)
13098 case OP_STORE_MEMBASE_REG:
13099 return OP_STORE_MEMBASE_IMM;
13100 case OP_STOREI1_MEMBASE_REG:
13101 return OP_STOREI1_MEMBASE_IMM;
13102 case OP_STOREI2_MEMBASE_REG:
13103 return OP_STOREI2_MEMBASE_IMM;
13104 case OP_STOREI4_MEMBASE_REG:
13105 return OP_STOREI4_MEMBASE_IMM;
13106 case OP_STOREI8_MEMBASE_REG:
13107 return OP_STOREI8_MEMBASE_IMM;
13109 g_assert_not_reached ();
13116 mono_op_to_op_imm (int opcode)
13120 return OP_IADD_IMM;
13122 return OP_ISUB_IMM;
13124 return OP_IDIV_IMM;
13126 return OP_IDIV_UN_IMM;
13128 return OP_IREM_IMM;
13130 return OP_IREM_UN_IMM;
13132 return OP_IMUL_IMM;
13134 return OP_IAND_IMM;
13138 return OP_IXOR_IMM;
13140 return OP_ISHL_IMM;
13142 return OP_ISHR_IMM;
13144 return OP_ISHR_UN_IMM;
13147 return OP_LADD_IMM;
13149 return OP_LSUB_IMM;
13151 return OP_LAND_IMM;
13155 return OP_LXOR_IMM;
13157 return OP_LSHL_IMM;
13159 return OP_LSHR_IMM;
13161 return OP_LSHR_UN_IMM;
13162 #if SIZEOF_REGISTER == 8
13164 return OP_LREM_IMM;
13168 return OP_COMPARE_IMM;
13170 return OP_ICOMPARE_IMM;
13172 return OP_LCOMPARE_IMM;
13174 case OP_STORE_MEMBASE_REG:
13175 return OP_STORE_MEMBASE_IMM;
13176 case OP_STOREI1_MEMBASE_REG:
13177 return OP_STOREI1_MEMBASE_IMM;
13178 case OP_STOREI2_MEMBASE_REG:
13179 return OP_STOREI2_MEMBASE_IMM;
13180 case OP_STOREI4_MEMBASE_REG:
13181 return OP_STOREI4_MEMBASE_IMM;
13183 #if defined(TARGET_X86) || defined (TARGET_AMD64)
13185 return OP_X86_PUSH_IMM;
13186 case OP_X86_COMPARE_MEMBASE_REG:
13187 return OP_X86_COMPARE_MEMBASE_IMM;
13189 #if defined(TARGET_AMD64)
13190 case OP_AMD64_ICOMPARE_MEMBASE_REG:
13191 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
13193 case OP_VOIDCALL_REG:
13194 return OP_VOIDCALL;
13202 return OP_LOCALLOC_IMM;
13209 ldind_to_load_membase (int opcode)
13213 return OP_LOADI1_MEMBASE;
13215 return OP_LOADU1_MEMBASE;
13217 return OP_LOADI2_MEMBASE;
13219 return OP_LOADU2_MEMBASE;
13221 return OP_LOADI4_MEMBASE;
13223 return OP_LOADU4_MEMBASE;
13225 return OP_LOAD_MEMBASE;
13226 case CEE_LDIND_REF:
13227 return OP_LOAD_MEMBASE;
13229 return OP_LOADI8_MEMBASE;
13231 return OP_LOADR4_MEMBASE;
13233 return OP_LOADR8_MEMBASE;
13235 g_assert_not_reached ();
13242 stind_to_store_membase (int opcode)
13246 return OP_STOREI1_MEMBASE_REG;
13248 return OP_STOREI2_MEMBASE_REG;
13250 return OP_STOREI4_MEMBASE_REG;
13252 case CEE_STIND_REF:
13253 return OP_STORE_MEMBASE_REG;
13255 return OP_STOREI8_MEMBASE_REG;
13257 return OP_STORER4_MEMBASE_REG;
13259 return OP_STORER8_MEMBASE_REG;
13261 g_assert_not_reached ();
13268 mono_load_membase_to_load_mem (int opcode)
13270 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
13271 #if defined(TARGET_X86) || defined(TARGET_AMD64)
13273 case OP_LOAD_MEMBASE:
13274 return OP_LOAD_MEM;
13275 case OP_LOADU1_MEMBASE:
13276 return OP_LOADU1_MEM;
13277 case OP_LOADU2_MEMBASE:
13278 return OP_LOADU2_MEM;
13279 case OP_LOADI4_MEMBASE:
13280 return OP_LOADI4_MEM;
13281 case OP_LOADU4_MEMBASE:
13282 return OP_LOADU4_MEM;
13283 #if SIZEOF_REGISTER == 8
13284 case OP_LOADI8_MEMBASE:
13285 return OP_LOADI8_MEM;
13294 op_to_op_dest_membase (int store_opcode, int opcode)
13296 #if defined(TARGET_X86)
13297 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
13302 return OP_X86_ADD_MEMBASE_REG;
13304 return OP_X86_SUB_MEMBASE_REG;
13306 return OP_X86_AND_MEMBASE_REG;
13308 return OP_X86_OR_MEMBASE_REG;
13310 return OP_X86_XOR_MEMBASE_REG;
13313 return OP_X86_ADD_MEMBASE_IMM;
13316 return OP_X86_SUB_MEMBASE_IMM;
13319 return OP_X86_AND_MEMBASE_IMM;
13322 return OP_X86_OR_MEMBASE_IMM;
13325 return OP_X86_XOR_MEMBASE_IMM;
13331 #if defined(TARGET_AMD64)
13332 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
13337 return OP_X86_ADD_MEMBASE_REG;
13339 return OP_X86_SUB_MEMBASE_REG;
13341 return OP_X86_AND_MEMBASE_REG;
13343 return OP_X86_OR_MEMBASE_REG;
13345 return OP_X86_XOR_MEMBASE_REG;
13347 return OP_X86_ADD_MEMBASE_IMM;
13349 return OP_X86_SUB_MEMBASE_IMM;
13351 return OP_X86_AND_MEMBASE_IMM;
13353 return OP_X86_OR_MEMBASE_IMM;
13355 return OP_X86_XOR_MEMBASE_IMM;
13357 return OP_AMD64_ADD_MEMBASE_REG;
13359 return OP_AMD64_SUB_MEMBASE_REG;
13361 return OP_AMD64_AND_MEMBASE_REG;
13363 return OP_AMD64_OR_MEMBASE_REG;
13365 return OP_AMD64_XOR_MEMBASE_REG;
13368 return OP_AMD64_ADD_MEMBASE_IMM;
13371 return OP_AMD64_SUB_MEMBASE_IMM;
13374 return OP_AMD64_AND_MEMBASE_IMM;
13377 return OP_AMD64_OR_MEMBASE_IMM;
13380 return OP_AMD64_XOR_MEMBASE_IMM;
13390 op_to_op_store_membase (int store_opcode, int opcode)
13392 #if defined(TARGET_X86) || defined(TARGET_AMD64)
13395 if (store_opcode == OP_STOREI1_MEMBASE_REG)
13396 return OP_X86_SETEQ_MEMBASE;
13398 if (store_opcode == OP_STOREI1_MEMBASE_REG)
13399 return OP_X86_SETNE_MEMBASE;
13407 op_to_op_src1_membase (MonoCompile *cfg, int load_opcode, int opcode)
13410 /* FIXME: This has sign extension issues */
13412 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
13413 return OP_X86_COMPARE_MEMBASE8_IMM;
13416 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
13421 return OP_X86_PUSH_MEMBASE;
13422 case OP_COMPARE_IMM:
13423 case OP_ICOMPARE_IMM:
13424 return OP_X86_COMPARE_MEMBASE_IMM;
13427 return OP_X86_COMPARE_MEMBASE_REG;
13431 #ifdef TARGET_AMD64
13432 /* FIXME: This has sign extension issues */
13434 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
13435 return OP_X86_COMPARE_MEMBASE8_IMM;
13440 if ((load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32) || (load_opcode == OP_LOADI8_MEMBASE))
13441 return OP_X86_PUSH_MEMBASE;
13443 /* FIXME: This only works for 32 bit immediates
13444 case OP_COMPARE_IMM:
13445 case OP_LCOMPARE_IMM:
13446 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
13447 return OP_AMD64_COMPARE_MEMBASE_IMM;
13449 case OP_ICOMPARE_IMM:
13450 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
13451 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
13455 if (cfg->backend->ilp32 && load_opcode == OP_LOAD_MEMBASE)
13456 return OP_AMD64_ICOMPARE_MEMBASE_REG;
13457 if ((load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32) || (load_opcode == OP_LOADI8_MEMBASE))
13458 return OP_AMD64_COMPARE_MEMBASE_REG;
13461 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
13462 return OP_AMD64_ICOMPARE_MEMBASE_REG;
13471 op_to_op_src2_membase (MonoCompile *cfg, int load_opcode, int opcode)
13474 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
13480 return OP_X86_COMPARE_REG_MEMBASE;
13482 return OP_X86_ADD_REG_MEMBASE;
13484 return OP_X86_SUB_REG_MEMBASE;
13486 return OP_X86_AND_REG_MEMBASE;
13488 return OP_X86_OR_REG_MEMBASE;
13490 return OP_X86_XOR_REG_MEMBASE;
13494 #ifdef TARGET_AMD64
13495 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE && cfg->backend->ilp32)) {
13498 return OP_AMD64_ICOMPARE_REG_MEMBASE;
13500 return OP_X86_ADD_REG_MEMBASE;
13502 return OP_X86_SUB_REG_MEMBASE;
13504 return OP_X86_AND_REG_MEMBASE;
13506 return OP_X86_OR_REG_MEMBASE;
13508 return OP_X86_XOR_REG_MEMBASE;
13510 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32)) {
13514 return OP_AMD64_COMPARE_REG_MEMBASE;
13516 return OP_AMD64_ADD_REG_MEMBASE;
13518 return OP_AMD64_SUB_REG_MEMBASE;
13520 return OP_AMD64_AND_REG_MEMBASE;
13522 return OP_AMD64_OR_REG_MEMBASE;
13524 return OP_AMD64_XOR_REG_MEMBASE;
13533 mono_op_to_op_imm_noemul (int opcode)
13536 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
13542 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
13549 #if defined(MONO_ARCH_EMULATE_MUL_DIV)
13554 return mono_op_to_op_imm (opcode);
13559 * mono_handle_global_vregs:
13561 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
13565 mono_handle_global_vregs (MonoCompile *cfg)
13567 gint32 *vreg_to_bb;
13568 MonoBasicBlock *bb;
13571 vreg_to_bb = (gint32 *)mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
13573 #ifdef MONO_ARCH_SIMD_INTRINSICS
13574 if (cfg->uses_simd_intrinsics)
13575 mono_simd_simplify_indirection (cfg);
13578 /* Find local vregs used in more than one bb */
13579 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13580 MonoInst *ins = bb->code;
13581 int block_num = bb->block_num;
13583 if (cfg->verbose_level > 2)
13584 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
13587 for (; ins; ins = ins->next) {
13588 const char *spec = INS_INFO (ins->opcode);
13589 int regtype = 0, regindex;
13592 if (G_UNLIKELY (cfg->verbose_level > 2))
13593 mono_print_ins (ins);
13595 g_assert (ins->opcode >= MONO_CEE_LAST);
13597 for (regindex = 0; regindex < 4; regindex ++) {
13600 if (regindex == 0) {
13601 regtype = spec [MONO_INST_DEST];
13602 if (regtype == ' ')
13605 } else if (regindex == 1) {
13606 regtype = spec [MONO_INST_SRC1];
13607 if (regtype == ' ')
13610 } else if (regindex == 2) {
13611 regtype = spec [MONO_INST_SRC2];
13612 if (regtype == ' ')
13615 } else if (regindex == 3) {
13616 regtype = spec [MONO_INST_SRC3];
13617 if (regtype == ' ')
13622 #if SIZEOF_REGISTER == 4
13623 /* In the LLVM case, the long opcodes are not decomposed */
13624 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
13626 * Since some instructions reference the original long vreg,
13627 * and some reference the two component vregs, it is quite hard
13628 * to determine when it needs to be global. So be conservative.
13630 if (!get_vreg_to_inst (cfg, vreg)) {
13631 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
13633 if (cfg->verbose_level > 2)
13634 printf ("LONG VREG R%d made global.\n", vreg);
13638 * Make the component vregs volatile since the optimizations can
13639 * get confused otherwise.
13641 get_vreg_to_inst (cfg, MONO_LVREG_LS (vreg))->flags |= MONO_INST_VOLATILE;
13642 get_vreg_to_inst (cfg, MONO_LVREG_MS (vreg))->flags |= MONO_INST_VOLATILE;
13646 g_assert (vreg != -1);
13648 prev_bb = vreg_to_bb [vreg];
13649 if (prev_bb == 0) {
13650 /* 0 is a valid block num */
13651 vreg_to_bb [vreg] = block_num + 1;
13652 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
13653 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
13656 if (!get_vreg_to_inst (cfg, vreg)) {
13657 if (G_UNLIKELY (cfg->verbose_level > 2))
13658 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
13662 if (vreg_is_ref (cfg, vreg))
13663 mono_compile_create_var_for_vreg (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL, vreg);
13665 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
13668 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
13671 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
13675 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
13678 g_assert_not_reached ();
13682 /* Flag as having been used in more than one bb */
13683 vreg_to_bb [vreg] = -1;
13689 /* If a variable is used in only one bblock, convert it into a local vreg */
13690 for (i = 0; i < cfg->num_varinfo; i++) {
13691 MonoInst *var = cfg->varinfo [i];
13692 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
13694 switch (var->type) {
13700 #if SIZEOF_REGISTER == 8
13703 #if !defined(TARGET_X86)
13704 /* Enabling this screws up the fp stack on x86 */
13707 if (mono_arch_is_soft_float ())
13711 if (var->type == STACK_VTYPE && cfg->gsharedvt && mini_is_gsharedvt_variable_type (var->inst_vtype))
13715 /* Arguments are implicitly global */
13716 /* Putting R4 vars into registers doesn't work currently */
13717 /* The gsharedvt vars are implicitly referenced by ldaddr opcodes, but those opcodes are only generated later */
13718 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg && var != cfg->gsharedvt_info_var && var != cfg->gsharedvt_locals_var && var != cfg->lmf_addr_var) {
13720 * Make that the variable's liveness interval doesn't contain a call, since
13721 * that would cause the lvreg to be spilled, making the whole optimization
13724 /* This is too slow for JIT compilation */
13726 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
13728 int def_index, call_index, ins_index;
13729 gboolean spilled = FALSE;
13734 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
13735 const char *spec = INS_INFO (ins->opcode);
13737 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
13738 def_index = ins_index;
13740 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
13741 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
13742 if (call_index > def_index) {
13748 if (MONO_IS_CALL (ins))
13749 call_index = ins_index;
13759 if (G_UNLIKELY (cfg->verbose_level > 2))
13760 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
13761 var->flags |= MONO_INST_IS_DEAD;
13762 cfg->vreg_to_inst [var->dreg] = NULL;
13769 * Compress the varinfo and vars tables so the liveness computation is faster and
13770 * takes up less space.
13773 for (i = 0; i < cfg->num_varinfo; ++i) {
13774 MonoInst *var = cfg->varinfo [i];
13775 if (pos < i && cfg->locals_start == i)
13776 cfg->locals_start = pos;
13777 if (!(var->flags & MONO_INST_IS_DEAD)) {
13779 cfg->varinfo [pos] = cfg->varinfo [i];
13780 cfg->varinfo [pos]->inst_c0 = pos;
13781 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
13782 cfg->vars [pos].idx = pos;
13783 #if SIZEOF_REGISTER == 4
13784 if (cfg->varinfo [pos]->type == STACK_I8) {
13785 /* Modify the two component vars too */
13788 var1 = get_vreg_to_inst (cfg, MONO_LVREG_LS (cfg->varinfo [pos]->dreg));
13789 var1->inst_c0 = pos;
13790 var1 = get_vreg_to_inst (cfg, MONO_LVREG_MS (cfg->varinfo [pos]->dreg));
13791 var1->inst_c0 = pos;
13798 cfg->num_varinfo = pos;
13799 if (cfg->locals_start > cfg->num_varinfo)
13800 cfg->locals_start = cfg->num_varinfo;
13804 * mono_allocate_gsharedvt_vars:
13806 * Allocate variables with gsharedvt types to entries in the MonoGSharedVtMethodRuntimeInfo.entries array.
13807 * Initialize cfg->gsharedvt_vreg_to_idx with the mapping between vregs and indexes.
13810 mono_allocate_gsharedvt_vars (MonoCompile *cfg)
13814 cfg->gsharedvt_vreg_to_idx = (int *)mono_mempool_alloc0 (cfg->mempool, sizeof (int) * cfg->next_vreg);
13816 for (i = 0; i < cfg->num_varinfo; ++i) {
13817 MonoInst *ins = cfg->varinfo [i];
13820 if (mini_is_gsharedvt_variable_type (ins->inst_vtype)) {
13821 if (i >= cfg->locals_start) {
13823 idx = get_gsharedvt_info_slot (cfg, ins->inst_vtype, MONO_RGCTX_INFO_LOCAL_OFFSET);
13824 cfg->gsharedvt_vreg_to_idx [ins->dreg] = idx + 1;
13825 ins->opcode = OP_GSHAREDVT_LOCAL;
13826 ins->inst_imm = idx;
13829 cfg->gsharedvt_vreg_to_idx [ins->dreg] = -1;
13830 ins->opcode = OP_GSHAREDVT_ARG_REGOFFSET;
13837 * mono_spill_global_vars:
13839 * Generate spill code for variables which are not allocated to registers,
13840 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
13841 * code is generated which could be optimized by the local optimization passes.
13844 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
13846 MonoBasicBlock *bb;
13848 int orig_next_vreg;
13849 guint32 *vreg_to_lvreg;
13851 guint32 i, lvregs_len, lvregs_size;
13852 gboolean dest_has_lvreg = FALSE;
13853 MonoStackType stacktypes [128];
13854 MonoInst **live_range_start, **live_range_end;
13855 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
13857 *need_local_opts = FALSE;
13859 memset (spec2, 0, sizeof (spec2));
13861 /* FIXME: Move this function to mini.c */
13862 stacktypes ['i'] = STACK_PTR;
13863 stacktypes ['l'] = STACK_I8;
13864 stacktypes ['f'] = STACK_R8;
13865 #ifdef MONO_ARCH_SIMD_INTRINSICS
13866 stacktypes ['x'] = STACK_VTYPE;
13869 #if SIZEOF_REGISTER == 4
13870 /* Create MonoInsts for longs */
13871 for (i = 0; i < cfg->num_varinfo; i++) {
13872 MonoInst *ins = cfg->varinfo [i];
13874 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
13875 switch (ins->type) {
13880 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
13883 g_assert (ins->opcode == OP_REGOFFSET);
13885 tree = get_vreg_to_inst (cfg, MONO_LVREG_LS (ins->dreg));
13887 tree->opcode = OP_REGOFFSET;
13888 tree->inst_basereg = ins->inst_basereg;
13889 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
13891 tree = get_vreg_to_inst (cfg, MONO_LVREG_MS (ins->dreg));
13893 tree->opcode = OP_REGOFFSET;
13894 tree->inst_basereg = ins->inst_basereg;
13895 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
13905 if (cfg->compute_gc_maps) {
13906 /* registers need liveness info even for !non refs */
13907 for (i = 0; i < cfg->num_varinfo; i++) {
13908 MonoInst *ins = cfg->varinfo [i];
13910 if (ins->opcode == OP_REGVAR)
13911 ins->flags |= MONO_INST_GC_TRACK;
13915 /* FIXME: widening and truncation */
13918 * As an optimization, when a variable allocated to the stack is first loaded into
13919 * an lvreg, we will remember the lvreg and use it the next time instead of loading
13920 * the variable again.
13922 orig_next_vreg = cfg->next_vreg;
13923 vreg_to_lvreg = (guint32 *)mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
13924 lvregs_size = 1024;
13925 lvregs = (guint32 *)mono_mempool_alloc (cfg->mempool, sizeof (guint32) * lvregs_size);
13929 * These arrays contain the first and last instructions accessing a given
13931 * Since we emit bblocks in the same order we process them here, and we
13932 * don't split live ranges, these will precisely describe the live range of
13933 * the variable, i.e. the instruction range where a valid value can be found
13934 * in the variables location.
13935 * The live range is computed using the liveness info computed by the liveness pass.
13936 * We can't use vmv->range, since that is an abstract live range, and we need
13937 * one which is instruction precise.
13938 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
13940 /* FIXME: Only do this if debugging info is requested */
13941 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
13942 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
13943 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
13944 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
13946 /* Add spill loads/stores */
13947 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13950 if (cfg->verbose_level > 2)
13951 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
13953 /* Clear vreg_to_lvreg array */
13954 for (i = 0; i < lvregs_len; i++)
13955 vreg_to_lvreg [lvregs [i]] = 0;
13959 MONO_BB_FOR_EACH_INS (bb, ins) {
13960 const char *spec = INS_INFO (ins->opcode);
13961 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
13962 gboolean store, no_lvreg;
13963 int sregs [MONO_MAX_SRC_REGS];
13965 if (G_UNLIKELY (cfg->verbose_level > 2))
13966 mono_print_ins (ins);
13968 if (ins->opcode == OP_NOP)
13972 * We handle LDADDR here as well, since it can only be decomposed
13973 * when variable addresses are known.
13975 if (ins->opcode == OP_LDADDR) {
13976 MonoInst *var = (MonoInst *)ins->inst_p0;
13978 if (var->opcode == OP_VTARG_ADDR) {
13979 /* Happens on SPARC/S390 where vtypes are passed by reference */
13980 MonoInst *vtaddr = var->inst_left;
13981 if (vtaddr->opcode == OP_REGVAR) {
13982 ins->opcode = OP_MOVE;
13983 ins->sreg1 = vtaddr->dreg;
13985 else if (var->inst_left->opcode == OP_REGOFFSET) {
13986 ins->opcode = OP_LOAD_MEMBASE;
13987 ins->inst_basereg = vtaddr->inst_basereg;
13988 ins->inst_offset = vtaddr->inst_offset;
13991 } else if (cfg->gsharedvt && cfg->gsharedvt_vreg_to_idx [var->dreg] < 0) {
13992 /* gsharedvt arg passed by ref */
13993 g_assert (var->opcode == OP_GSHAREDVT_ARG_REGOFFSET);
13995 ins->opcode = OP_LOAD_MEMBASE;
13996 ins->inst_basereg = var->inst_basereg;
13997 ins->inst_offset = var->inst_offset;
13998 } else if (cfg->gsharedvt && cfg->gsharedvt_vreg_to_idx [var->dreg]) {
13999 MonoInst *load, *load2, *load3;
14000 int idx = cfg->gsharedvt_vreg_to_idx [var->dreg] - 1;
14001 int reg1, reg2, reg3;
14002 MonoInst *info_var = cfg->gsharedvt_info_var;
14003 MonoInst *locals_var = cfg->gsharedvt_locals_var;
14007 * Compute the address of the local as gsharedvt_locals_var + gsharedvt_info_var->locals_offsets [idx].
14010 g_assert (var->opcode == OP_GSHAREDVT_LOCAL);
14012 g_assert (info_var);
14013 g_assert (locals_var);
14015 /* Mark the instruction used to compute the locals var as used */
14016 cfg->gsharedvt_locals_var_ins = NULL;
14018 /* Load the offset */
14019 if (info_var->opcode == OP_REGOFFSET) {
14020 reg1 = alloc_ireg (cfg);
14021 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, reg1, info_var->inst_basereg, info_var->inst_offset);
14022 } else if (info_var->opcode == OP_REGVAR) {
14024 reg1 = info_var->dreg;
14026 g_assert_not_reached ();
14028 reg2 = alloc_ireg (cfg);
14029 NEW_LOAD_MEMBASE (cfg, load2, OP_LOADI4_MEMBASE, reg2, reg1, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
14030 /* Load the locals area address */
14031 reg3 = alloc_ireg (cfg);
14032 if (locals_var->opcode == OP_REGOFFSET) {
14033 NEW_LOAD_MEMBASE (cfg, load3, OP_LOAD_MEMBASE, reg3, locals_var->inst_basereg, locals_var->inst_offset);
14034 } else if (locals_var->opcode == OP_REGVAR) {
14035 NEW_UNALU (cfg, load3, OP_MOVE, reg3, locals_var->dreg);
14037 g_assert_not_reached ();
14039 /* Compute the address */
14040 ins->opcode = OP_PADD;
14044 mono_bblock_insert_before_ins (bb, ins, load3);
14045 mono_bblock_insert_before_ins (bb, load3, load2);
14047 mono_bblock_insert_before_ins (bb, load2, load);
14049 g_assert (var->opcode == OP_REGOFFSET);
14051 ins->opcode = OP_ADD_IMM;
14052 ins->sreg1 = var->inst_basereg;
14053 ins->inst_imm = var->inst_offset;
14056 *need_local_opts = TRUE;
14057 spec = INS_INFO (ins->opcode);
14060 if (ins->opcode < MONO_CEE_LAST) {
14061 mono_print_ins (ins);
14062 g_assert_not_reached ();
14066 * Store opcodes have destbasereg in the dreg, but in reality, it is an
14070 if (MONO_IS_STORE_MEMBASE (ins)) {
14071 tmp_reg = ins->dreg;
14072 ins->dreg = ins->sreg2;
14073 ins->sreg2 = tmp_reg;
14076 spec2 [MONO_INST_DEST] = ' ';
14077 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
14078 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
14079 spec2 [MONO_INST_SRC3] = ' ';
14081 } else if (MONO_IS_STORE_MEMINDEX (ins))
14082 g_assert_not_reached ();
14087 if (G_UNLIKELY (cfg->verbose_level > 2)) {
14088 printf ("\t %.3s %d", spec, ins->dreg);
14089 num_sregs = mono_inst_get_src_registers (ins, sregs);
14090 for (srcindex = 0; srcindex < num_sregs; ++srcindex)
14091 printf (" %d", sregs [srcindex]);
14098 regtype = spec [MONO_INST_DEST];
14099 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
14102 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
14103 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
14104 MonoInst *store_ins;
14106 MonoInst *def_ins = ins;
14107 int dreg = ins->dreg; /* The original vreg */
14109 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
14111 if (var->opcode == OP_REGVAR) {
14112 ins->dreg = var->dreg;
14113 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
14115 * Instead of emitting a load+store, use a _membase opcode.
14117 g_assert (var->opcode == OP_REGOFFSET);
14118 if (ins->opcode == OP_MOVE) {
14122 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
14123 ins->inst_basereg = var->inst_basereg;
14124 ins->inst_offset = var->inst_offset;
14127 spec = INS_INFO (ins->opcode);
14131 g_assert (var->opcode == OP_REGOFFSET);
14133 prev_dreg = ins->dreg;
14135 /* Invalidate any previous lvreg for this vreg */
14136 vreg_to_lvreg [ins->dreg] = 0;
14140 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
14142 store_opcode = OP_STOREI8_MEMBASE_REG;
14145 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
14147 #if SIZEOF_REGISTER != 8
14148 if (regtype == 'l') {
14149 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, MONO_LVREG_LS (ins->dreg));
14150 mono_bblock_insert_after_ins (bb, ins, store_ins);
14151 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, MONO_LVREG_MS (ins->dreg));
14152 mono_bblock_insert_after_ins (bb, ins, store_ins);
14153 def_ins = store_ins;
14158 g_assert (store_opcode != OP_STOREV_MEMBASE);
14160 /* Try to fuse the store into the instruction itself */
14161 /* FIXME: Add more instructions */
14162 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
14163 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
14164 ins->inst_imm = ins->inst_c0;
14165 ins->inst_destbasereg = var->inst_basereg;
14166 ins->inst_offset = var->inst_offset;
14167 spec = INS_INFO (ins->opcode);
14168 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE) || (ins->opcode == OP_RMOVE))) {
14169 ins->opcode = store_opcode;
14170 ins->inst_destbasereg = var->inst_basereg;
14171 ins->inst_offset = var->inst_offset;
14175 tmp_reg = ins->dreg;
14176 ins->dreg = ins->sreg2;
14177 ins->sreg2 = tmp_reg;
14180 spec2 [MONO_INST_DEST] = ' ';
14181 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
14182 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
14183 spec2 [MONO_INST_SRC3] = ' ';
14185 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
14186 // FIXME: The backends expect the base reg to be in inst_basereg
14187 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
14189 ins->inst_basereg = var->inst_basereg;
14190 ins->inst_offset = var->inst_offset;
14191 spec = INS_INFO (ins->opcode);
14193 /* printf ("INS: "); mono_print_ins (ins); */
14194 /* Create a store instruction */
14195 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
14197 /* Insert it after the instruction */
14198 mono_bblock_insert_after_ins (bb, ins, store_ins);
14200 def_ins = store_ins;
14203 * We can't assign ins->dreg to var->dreg here, since the
14204 * sregs could use it. So set a flag, and do it after
14207 if ((!cfg->backend->use_fpstack || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
14208 dest_has_lvreg = TRUE;
14213 if (def_ins && !live_range_start [dreg]) {
14214 live_range_start [dreg] = def_ins;
14215 live_range_start_bb [dreg] = bb;
14218 if (cfg->compute_gc_maps && def_ins && (var->flags & MONO_INST_GC_TRACK)) {
14221 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_DEF);
14222 tmp->inst_c1 = dreg;
14223 mono_bblock_insert_after_ins (bb, def_ins, tmp);
14230 num_sregs = mono_inst_get_src_registers (ins, sregs);
14231 for (srcindex = 0; srcindex < 3; ++srcindex) {
14232 regtype = spec [MONO_INST_SRC1 + srcindex];
14233 sreg = sregs [srcindex];
14235 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
14236 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
14237 MonoInst *var = get_vreg_to_inst (cfg, sreg);
14238 MonoInst *use_ins = ins;
14239 MonoInst *load_ins;
14240 guint32 load_opcode;
14242 if (var->opcode == OP_REGVAR) {
14243 sregs [srcindex] = var->dreg;
14244 //mono_inst_set_src_registers (ins, sregs);
14245 live_range_end [sreg] = use_ins;
14246 live_range_end_bb [sreg] = bb;
14248 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
14251 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
14252 /* var->dreg is a hreg */
14253 tmp->inst_c1 = sreg;
14254 mono_bblock_insert_after_ins (bb, ins, tmp);
14260 g_assert (var->opcode == OP_REGOFFSET);
14262 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
14264 g_assert (load_opcode != OP_LOADV_MEMBASE);
14266 if (vreg_to_lvreg [sreg]) {
14267 g_assert (vreg_to_lvreg [sreg] != -1);
14269 /* The variable is already loaded to an lvreg */
14270 if (G_UNLIKELY (cfg->verbose_level > 2))
14271 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
14272 sregs [srcindex] = vreg_to_lvreg [sreg];
14273 //mono_inst_set_src_registers (ins, sregs);
14277 /* Try to fuse the load into the instruction */
14278 if ((srcindex == 0) && (op_to_op_src1_membase (cfg, load_opcode, ins->opcode) != -1)) {
14279 ins->opcode = op_to_op_src1_membase (cfg, load_opcode, ins->opcode);
14280 sregs [0] = var->inst_basereg;
14281 //mono_inst_set_src_registers (ins, sregs);
14282 ins->inst_offset = var->inst_offset;
14283 } else if ((srcindex == 1) && (op_to_op_src2_membase (cfg, load_opcode, ins->opcode) != -1)) {
14284 ins->opcode = op_to_op_src2_membase (cfg, load_opcode, ins->opcode);
14285 sregs [1] = var->inst_basereg;
14286 //mono_inst_set_src_registers (ins, sregs);
14287 ins->inst_offset = var->inst_offset;
14289 if (MONO_IS_REAL_MOVE (ins)) {
14290 ins->opcode = OP_NOP;
14293 //printf ("%d ", srcindex); mono_print_ins (ins);
14295 sreg = alloc_dreg (cfg, stacktypes [regtype]);
14297 if ((!cfg->backend->use_fpstack || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
14298 if (var->dreg == prev_dreg) {
14300 * sreg refers to the value loaded by the load
14301 * emitted below, but we need to use ins->dreg
14302 * since it refers to the store emitted earlier.
14306 g_assert (sreg != -1);
14307 vreg_to_lvreg [var->dreg] = sreg;
14308 if (lvregs_len >= lvregs_size) {
14309 guint32 *new_lvregs = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * lvregs_size * 2);
14310 memcpy (new_lvregs, lvregs, sizeof (guint32) * lvregs_size);
14311 lvregs = new_lvregs;
14314 lvregs [lvregs_len ++] = var->dreg;
14318 sregs [srcindex] = sreg;
14319 //mono_inst_set_src_registers (ins, sregs);
14321 #if SIZEOF_REGISTER != 8
14322 if (regtype == 'l') {
14323 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, MONO_LVREG_MS (sreg), var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
14324 mono_bblock_insert_before_ins (bb, ins, load_ins);
14325 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, MONO_LVREG_LS (sreg), var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
14326 mono_bblock_insert_before_ins (bb, ins, load_ins);
14327 use_ins = load_ins;
14332 #if SIZEOF_REGISTER == 4
14333 g_assert (load_opcode != OP_LOADI8_MEMBASE);
14335 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
14336 mono_bblock_insert_before_ins (bb, ins, load_ins);
14337 use_ins = load_ins;
14341 if (var->dreg < orig_next_vreg) {
14342 live_range_end [var->dreg] = use_ins;
14343 live_range_end_bb [var->dreg] = bb;
14346 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
14349 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
14350 tmp->inst_c1 = var->dreg;
14351 mono_bblock_insert_after_ins (bb, ins, tmp);
14355 mono_inst_set_src_registers (ins, sregs);
14357 if (dest_has_lvreg) {
14358 g_assert (ins->dreg != -1);
14359 vreg_to_lvreg [prev_dreg] = ins->dreg;
14360 if (lvregs_len >= lvregs_size) {
14361 guint32 *new_lvregs = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * lvregs_size * 2);
14362 memcpy (new_lvregs, lvregs, sizeof (guint32) * lvregs_size);
14363 lvregs = new_lvregs;
14366 lvregs [lvregs_len ++] = prev_dreg;
14367 dest_has_lvreg = FALSE;
14371 tmp_reg = ins->dreg;
14372 ins->dreg = ins->sreg2;
14373 ins->sreg2 = tmp_reg;
14376 if (MONO_IS_CALL (ins)) {
14377 /* Clear vreg_to_lvreg array */
14378 for (i = 0; i < lvregs_len; i++)
14379 vreg_to_lvreg [lvregs [i]] = 0;
14381 } else if (ins->opcode == OP_NOP) {
14383 MONO_INST_NULLIFY_SREGS (ins);
14386 if (cfg->verbose_level > 2)
14387 mono_print_ins_index (1, ins);
14390 /* Extend the live range based on the liveness info */
14391 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
14392 for (i = 0; i < cfg->num_varinfo; i ++) {
14393 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
14395 if (vreg_is_volatile (cfg, vi->vreg))
14396 /* The liveness info is incomplete */
14399 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
14400 /* Live from at least the first ins of this bb */
14401 live_range_start [vi->vreg] = bb->code;
14402 live_range_start_bb [vi->vreg] = bb;
14405 if (mono_bitset_test_fast (bb->live_out_set, i)) {
14406 /* Live at least until the last ins of this bb */
14407 live_range_end [vi->vreg] = bb->last_ins;
14408 live_range_end_bb [vi->vreg] = bb;
14415 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
14416 * by storing the current native offset into MonoMethodVar->live_range_start/end.
14418 if (cfg->backend->have_liverange_ops && cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
14419 for (i = 0; i < cfg->num_varinfo; ++i) {
14420 int vreg = MONO_VARINFO (cfg, i)->vreg;
14423 if (live_range_start [vreg]) {
14424 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
14426 ins->inst_c1 = vreg;
14427 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
14429 if (live_range_end [vreg]) {
14430 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
14432 ins->inst_c1 = vreg;
14433 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
14434 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
14436 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
14441 if (cfg->gsharedvt_locals_var_ins) {
14442 /* Nullify if unused */
14443 cfg->gsharedvt_locals_var_ins->opcode = OP_PCONST;
14444 cfg->gsharedvt_locals_var_ins->inst_imm = 0;
14447 g_free (live_range_start);
14448 g_free (live_range_end);
14449 g_free (live_range_start_bb);
14450 g_free (live_range_end_bb);
14456 * - use 'iadd' instead of 'int_add'
14457 * - handling ovf opcodes: decompose in method_to_ir.
14458 * - unify iregs/fregs
14459 * -> partly done, the missing parts are:
14460 * - a more complete unification would involve unifying the hregs as well, so
14461 * code wouldn't need if (fp) all over the place. but that would mean the hregs
14462 * would no longer map to the machine hregs, so the code generators would need to
14463 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
14464 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
14465 * fp/non-fp branches speeds it up by about 15%.
14466 * - use sext/zext opcodes instead of shifts
14468 * - get rid of TEMPLOADs if possible and use vregs instead
14469 * - clean up usage of OP_P/OP_ opcodes
14470 * - cleanup usage of DUMMY_USE
14471 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
14473 * - set the stack type and allocate a dreg in the EMIT_NEW macros
14474 * - get rid of all the <foo>2 stuff when the new JIT is ready.
14475 * - make sure handle_stack_args () is called before the branch is emitted
14476 * - when the new IR is done, get rid of all unused stuff
14477 * - COMPARE/BEQ as separate instructions or unify them ?
14478 * - keeping them separate allows specialized compare instructions like
14479 * compare_imm, compare_membase
14480 * - most back ends unify fp compare+branch, fp compare+ceq
14481 * - integrate mono_save_args into inline_method
14482 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
14483 * - handle long shift opts on 32 bit platforms somehow: they require
14484 * 3 sregs (2 for arg1 and 1 for arg2)
14485 * - make byref a 'normal' type.
14486 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
14487 * variable if needed.
14488 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
14489 * like inline_method.
14490 * - remove inlining restrictions
14491 * - fix LNEG and enable cfold of INEG
14492 * - generalize x86 optimizations like ldelema as a peephole optimization
14493 * - add store_mem_imm for amd64
14494 * - optimize the loading of the interruption flag in the managed->native wrappers
14495 * - avoid special handling of OP_NOP in passes
14496 * - move code inserting instructions into one function/macro.
14497 * - try a coalescing phase after liveness analysis
14498 * - add float -> vreg conversion + local optimizations on !x86
14499 * - figure out how to handle decomposed branches during optimizations, ie.
14500 * compare+branch, op_jump_table+op_br etc.
14501 * - promote RuntimeXHandles to vregs
14502 * - vtype cleanups:
14503 * - add a NEW_VARLOADA_VREG macro
14504 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
14505 * accessing vtype fields.
14506 * - get rid of I8CONST on 64 bit platforms
14507 * - dealing with the increase in code size due to branches created during opcode
14509 * - use extended basic blocks
14510 * - all parts of the JIT
14511 * - handle_global_vregs () && local regalloc
14512 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
14513 * - sources of increase in code size:
14516 * - isinst and castclass
14517 * - lvregs not allocated to global registers even if used multiple times
14518 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
14520 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
14521 * - add all micro optimizations from the old JIT
14522 * - put tree optimizations into the deadce pass
14523 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
14524 * specific function.
14525 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
14526 * fcompare + branchCC.
14527 * - create a helper function for allocating a stack slot, taking into account
14528 * MONO_CFG_HAS_SPILLUP.
14530 * - merge the ia64 switch changes.
14531 * - optimize mono_regstate2_alloc_int/float.
14532 * - fix the pessimistic handling of variables accessed in exception handler blocks.
14533 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
14534 * parts of the tree could be separated by other instructions, killing the tree
14535 * arguments, or stores killing loads etc. Also, should we fold loads into other
14536 * instructions if the result of the load is used multiple times ?
14537 * - make the REM_IMM optimization in mini-x86.c arch-independent.
14538 * - LAST MERGE: 108395.
14539 * - when returning vtypes in registers, generate IR and append it to the end of the
14540 * last bb instead of doing it in the epilog.
14541 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
14549 - When to decompose opcodes:
14550 - earlier: this makes some optimizations hard to implement, since the low level IR
14551 no longer contains the neccessary information. But it is easier to do.
14552 - later: harder to implement, enables more optimizations.
14553 - Branches inside bblocks:
14554 - created when decomposing complex opcodes.
14555 - branches to another bblock: harmless, but not tracked by the branch
14556 optimizations, so need to branch to a label at the start of the bblock.
14557 - branches to inside the same bblock: very problematic, trips up the local
14558 reg allocator. Can be fixed by spitting the current bblock, but that is a
14559 complex operation, since some local vregs can become global vregs etc.
14560 - Local/global vregs:
14561 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
14562 local register allocator.
14563 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
14564 structure, created by mono_create_var (). Assigned to hregs or the stack by
14565 the global register allocator.
14566 - When to do optimizations like alu->alu_imm:
14567 - earlier -> saves work later on since the IR will be smaller/simpler
14568 - later -> can work on more instructions
14569 - Handling of valuetypes:
14570 - When a vtype is pushed on the stack, a new temporary is created, an
14571 instruction computing its address (LDADDR) is emitted and pushed on
14572 the stack. Need to optimize cases when the vtype is used immediately as in
14573 argument passing, stloc etc.
14574 - Instead of the to_end stuff in the old JIT, simply call the function handling
14575 the values on the stack before emitting the last instruction of the bb.
14578 #else /* !DISABLE_JIT */
14581 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
14585 #endif /* !DISABLE_JIT */