3 * Convert CIL to the JIT internal representation
6 * Paolo Molaro (lupus@ximian.com)
7 * Dietmar Maurer (dietmar@ximian.com)
9 * (C) 2002 Ximian, Inc.
10 * Copyright 2003-2010 Novell, Inc (http://www.novell.com)
11 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
12 * Licensed under the MIT license. See LICENSE file in the project root for full license information.
16 #include <mono/utils/mono-compiler.h>
31 #ifdef HAVE_SYS_TIME_H
39 #include <mono/utils/memcheck.h>
40 #include <mono/metadata/abi-details.h>
41 #include <mono/metadata/assembly.h>
42 #include <mono/metadata/attrdefs.h>
43 #include <mono/metadata/loader.h>
44 #include <mono/metadata/tabledefs.h>
45 #include <mono/metadata/class.h>
46 #include <mono/metadata/object.h>
47 #include <mono/metadata/exception.h>
48 #include <mono/metadata/opcodes.h>
49 #include <mono/metadata/mono-endian.h>
50 #include <mono/metadata/tokentype.h>
51 #include <mono/metadata/tabledefs.h>
52 #include <mono/metadata/marshal.h>
53 #include <mono/metadata/debug-helpers.h>
54 #include <mono/metadata/debug-internals.h>
55 #include <mono/metadata/gc-internals.h>
56 #include <mono/metadata/security-manager.h>
57 #include <mono/metadata/threads-types.h>
58 #include <mono/metadata/security-core-clr.h>
59 #include <mono/metadata/profiler-private.h>
60 #include <mono/metadata/profiler.h>
61 #include <mono/metadata/monitor.h>
62 #include <mono/utils/mono-memory-model.h>
63 #include <mono/utils/mono-error-internals.h>
64 #include <mono/metadata/mono-basic-block.h>
65 #include <mono/metadata/reflection-internals.h>
66 #include <mono/utils/mono-threads-coop.h>
72 #include "jit-icalls.h"
74 #include "debugger-agent.h"
75 #include "seq-points.h"
76 #include "aot-compiler.h"
77 #include "mini-llvm.h"
79 #define BRANCH_COST 10
80 #define INLINE_LENGTH_LIMIT 20
82 /* These have 'cfg' as an implicit argument */
83 #define INLINE_FAILURE(msg) do { \
84 if ((cfg->method != cfg->current_method) && (cfg->current_method->wrapper_type == MONO_WRAPPER_NONE)) { \
85 inline_failure (cfg, msg); \
86 goto exception_exit; \
89 #define CHECK_CFG_EXCEPTION do {\
90 if (cfg->exception_type != MONO_EXCEPTION_NONE) \
91 goto exception_exit; \
93 #define FIELD_ACCESS_FAILURE(method, field) do { \
94 field_access_failure ((cfg), (method), (field)); \
95 goto exception_exit; \
97 #define GENERIC_SHARING_FAILURE(opcode) do { \
99 gshared_failure (cfg, opcode, __FILE__, __LINE__); \
100 goto exception_exit; \
103 #define GSHAREDVT_FAILURE(opcode) do { \
104 if (cfg->gsharedvt) { \
105 gsharedvt_failure (cfg, opcode, __FILE__, __LINE__); \
106 goto exception_exit; \
109 #define OUT_OF_MEMORY_FAILURE do { \
110 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); \
111 mono_error_set_out_of_memory (&cfg->error, ""); \
112 goto exception_exit; \
114 #define DISABLE_AOT(cfg) do { \
115 if ((cfg)->verbose_level >= 2) \
116 printf ("AOT disabled: %s:%d\n", __FILE__, __LINE__); \
117 (cfg)->disable_aot = TRUE; \
119 #define LOAD_ERROR do { \
120 break_on_unverified (); \
121 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD); \
122 goto exception_exit; \
125 #define TYPE_LOAD_ERROR(klass) do { \
126 cfg->exception_ptr = klass; \
130 #define CHECK_CFG_ERROR do {\
131 if (!mono_error_ok (&cfg->error)) { \
132 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); \
133 goto mono_error_exit; \
137 /* Determine whenever 'ins' represents a load of the 'this' argument */
138 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
140 static int ldind_to_load_membase (int opcode);
141 static int stind_to_store_membase (int opcode);
143 int mono_op_to_op_imm (int opcode);
144 int mono_op_to_op_imm_noemul (int opcode);
146 static int inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
147 guchar *ip, guint real_offset, gboolean inline_always);
149 emit_llvmonly_virtual_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used, MonoInst **sp);
151 /* helper methods signatures */
152 static MonoMethodSignature *helper_sig_domain_get;
153 static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
154 static MonoMethodSignature *helper_sig_llvmonly_imt_trampoline;
155 static MonoMethodSignature *helper_sig_jit_thread_attach;
156 static MonoMethodSignature *helper_sig_get_tls_tramp;
157 static MonoMethodSignature *helper_sig_set_tls_tramp;
159 /* type loading helpers */
160 static GENERATE_GET_CLASS_WITH_CACHE (runtime_helpers, "System.Runtime.CompilerServices", "RuntimeHelpers")
161 static GENERATE_TRY_GET_CLASS_WITH_CACHE (debuggable_attribute, "System.Diagnostics", "DebuggableAttribute")
164 * Instruction metadata
172 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
173 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
179 #if SIZEOF_REGISTER == 8 && SIZEOF_REGISTER == SIZEOF_VOID_P
184 /* keep in sync with the enum in mini.h */
187 #include "mini-ops.h"
192 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
193 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
195 * This should contain the index of the last sreg + 1. This is not the same
196 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
198 const gint8 ins_sreg_counts[] = {
199 #include "mini-ops.h"
205 mono_alloc_ireg (MonoCompile *cfg)
207 return alloc_ireg (cfg);
211 mono_alloc_lreg (MonoCompile *cfg)
213 return alloc_lreg (cfg);
217 mono_alloc_freg (MonoCompile *cfg)
219 return alloc_freg (cfg);
223 mono_alloc_preg (MonoCompile *cfg)
225 return alloc_preg (cfg);
229 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
231 return alloc_dreg (cfg, stack_type);
235 * mono_alloc_ireg_ref:
237 * Allocate an IREG, and mark it as holding a GC ref.
240 mono_alloc_ireg_ref (MonoCompile *cfg)
242 return alloc_ireg_ref (cfg);
246 * mono_alloc_ireg_mp:
248 * Allocate an IREG, and mark it as holding a managed pointer.
251 mono_alloc_ireg_mp (MonoCompile *cfg)
253 return alloc_ireg_mp (cfg);
257 * mono_alloc_ireg_copy:
259 * Allocate an IREG with the same GC type as VREG.
262 mono_alloc_ireg_copy (MonoCompile *cfg, guint32 vreg)
264 if (vreg_is_ref (cfg, vreg))
265 return alloc_ireg_ref (cfg);
266 else if (vreg_is_mp (cfg, vreg))
267 return alloc_ireg_mp (cfg);
269 return alloc_ireg (cfg);
273 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
278 type = mini_get_underlying_type (type);
280 switch (type->type) {
293 case MONO_TYPE_FNPTR:
295 case MONO_TYPE_CLASS:
296 case MONO_TYPE_STRING:
297 case MONO_TYPE_OBJECT:
298 case MONO_TYPE_SZARRAY:
299 case MONO_TYPE_ARRAY:
303 #if SIZEOF_REGISTER == 8
309 return cfg->r4fp ? OP_RMOVE : OP_FMOVE;
312 case MONO_TYPE_VALUETYPE:
313 if (type->data.klass->enumtype) {
314 type = mono_class_enum_basetype (type->data.klass);
317 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
320 case MONO_TYPE_TYPEDBYREF:
322 case MONO_TYPE_GENERICINST:
323 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
325 type = &type->data.generic_class->container_class->byval_arg;
329 g_assert (cfg->gshared);
330 if (mini_type_var_is_vt (type))
333 return mono_type_to_regmove (cfg, mini_get_underlying_type (type));
335 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
341 mono_print_bb (MonoBasicBlock *bb, const char *msg)
345 GString *str = g_string_new ("");
347 g_string_append_printf (str, "%s %d: [IN: ", msg, bb->block_num);
348 for (i = 0; i < bb->in_count; ++i)
349 g_string_append_printf (str, " BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
350 g_string_append_printf (str, ", OUT: ");
351 for (i = 0; i < bb->out_count; ++i)
352 g_string_append_printf (str, " BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
353 g_string_append_printf (str, " ]\n");
355 g_print ("%s", str->str);
356 g_string_free (str, TRUE);
358 for (tree = bb->code; tree; tree = tree->next)
359 mono_print_ins_index (-1, tree);
363 mono_create_helper_signatures (void)
365 helper_sig_domain_get = mono_create_icall_signature ("ptr");
366 helper_sig_rgctx_lazy_fetch_trampoline = mono_create_icall_signature ("ptr ptr");
367 helper_sig_llvmonly_imt_trampoline = mono_create_icall_signature ("ptr ptr ptr");
368 helper_sig_jit_thread_attach = mono_create_icall_signature ("ptr ptr");
369 helper_sig_get_tls_tramp = mono_create_icall_signature ("ptr");
370 helper_sig_set_tls_tramp = mono_create_icall_signature ("void ptr");
373 static MONO_NEVER_INLINE void
374 break_on_unverified (void)
376 if (mini_get_debug_options ()->break_on_unverified)
380 static MONO_NEVER_INLINE void
381 field_access_failure (MonoCompile *cfg, MonoMethod *method, MonoClassField *field)
383 char *method_fname = mono_method_full_name (method, TRUE);
384 char *field_fname = mono_field_full_name (field);
385 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
386 mono_error_set_generic_error (&cfg->error, "System", "FieldAccessException", "Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname);
387 g_free (method_fname);
388 g_free (field_fname);
391 static MONO_NEVER_INLINE void
392 inline_failure (MonoCompile *cfg, const char *msg)
394 if (cfg->verbose_level >= 2)
395 printf ("inline failed: %s\n", msg);
396 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INLINE_FAILED);
399 static MONO_NEVER_INLINE void
400 gshared_failure (MonoCompile *cfg, int opcode, const char *file, int line)
402 if (cfg->verbose_level > 2) \
403 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), line);
404 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
407 static MONO_NEVER_INLINE void
408 gsharedvt_failure (MonoCompile *cfg, int opcode, const char *file, int line)
410 cfg->exception_message = g_strdup_printf ("gsharedvt failed for method %s.%s.%s/%d opcode %s %s:%d", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), file, line);
411 if (cfg->verbose_level >= 2)
412 printf ("%s\n", cfg->exception_message);
413 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
417 * When using gsharedvt, some instatiations might be verifiable, and some might be not. i.e.
418 * foo<T> (int i) { ldarg.0; box T; }
420 #define UNVERIFIED do { \
421 if (cfg->gsharedvt) { \
422 if (cfg->verbose_level > 2) \
423 printf ("gsharedvt method failed to verify, falling back to instantiation.\n"); \
424 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
425 goto exception_exit; \
427 break_on_unverified (); \
431 #define GET_BBLOCK(cfg,tblock,ip) do { \
432 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
434 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
435 NEW_BBLOCK (cfg, (tblock)); \
436 (tblock)->cil_code = (ip); \
437 ADD_BBLOCK (cfg, (tblock)); \
441 #if defined(TARGET_X86) || defined(TARGET_AMD64)
442 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
443 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
444 (dest)->dreg = alloc_ireg_mp ((cfg)); \
445 (dest)->sreg1 = (sr1); \
446 (dest)->sreg2 = (sr2); \
447 (dest)->inst_imm = (imm); \
448 (dest)->backend.shift_amount = (shift); \
449 MONO_ADD_INS ((cfg)->cbb, (dest)); \
453 /* Emit conversions so both operands of a binary opcode are of the same type */
455 add_widen_op (MonoCompile *cfg, MonoInst *ins, MonoInst **arg1_ref, MonoInst **arg2_ref)
457 MonoInst *arg1 = *arg1_ref;
458 MonoInst *arg2 = *arg2_ref;
461 ((arg1->type == STACK_R4 && arg2->type == STACK_R8) ||
462 (arg1->type == STACK_R8 && arg2->type == STACK_R4))) {
465 /* Mixing r4/r8 is allowed by the spec */
466 if (arg1->type == STACK_R4) {
467 int dreg = alloc_freg (cfg);
469 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg1->dreg);
470 conv->type = STACK_R8;
474 if (arg2->type == STACK_R4) {
475 int dreg = alloc_freg (cfg);
477 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg2->dreg);
478 conv->type = STACK_R8;
484 #if SIZEOF_REGISTER == 8
485 /* FIXME: Need to add many more cases */
486 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) {
489 int dr = alloc_preg (cfg);
490 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg);
491 (ins)->sreg2 = widen->dreg;
496 #define ADD_BINOP(op) do { \
497 MONO_INST_NEW (cfg, ins, (op)); \
499 ins->sreg1 = sp [0]->dreg; \
500 ins->sreg2 = sp [1]->dreg; \
501 type_from_op (cfg, ins, sp [0], sp [1]); \
503 /* Have to insert a widening op */ \
504 add_widen_op (cfg, ins, &sp [0], &sp [1]); \
505 ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type); \
506 MONO_ADD_INS ((cfg)->cbb, (ins)); \
507 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
510 #define ADD_UNOP(op) do { \
511 MONO_INST_NEW (cfg, ins, (op)); \
513 ins->sreg1 = sp [0]->dreg; \
514 type_from_op (cfg, ins, sp [0], NULL); \
516 (ins)->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type); \
517 MONO_ADD_INS ((cfg)->cbb, (ins)); \
518 *sp++ = mono_decompose_opcode (cfg, ins); \
521 #define ADD_BINCOND(next_block) do { \
524 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
525 cmp->sreg1 = sp [0]->dreg; \
526 cmp->sreg2 = sp [1]->dreg; \
527 type_from_op (cfg, cmp, sp [0], sp [1]); \
529 add_widen_op (cfg, cmp, &sp [0], &sp [1]); \
530 type_from_op (cfg, ins, sp [0], sp [1]); \
531 ins->inst_many_bb = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
532 GET_BBLOCK (cfg, tblock, target); \
533 link_bblock (cfg, cfg->cbb, tblock); \
534 ins->inst_true_bb = tblock; \
535 if ((next_block)) { \
536 link_bblock (cfg, cfg->cbb, (next_block)); \
537 ins->inst_false_bb = (next_block); \
538 start_new_bblock = 1; \
540 GET_BBLOCK (cfg, tblock, ip); \
541 link_bblock (cfg, cfg->cbb, tblock); \
542 ins->inst_false_bb = tblock; \
543 start_new_bblock = 2; \
545 if (sp != stack_start) { \
546 handle_stack_args (cfg, stack_start, sp - stack_start); \
547 CHECK_UNVERIFIABLE (cfg); \
549 MONO_ADD_INS (cfg->cbb, cmp); \
550 MONO_ADD_INS (cfg->cbb, ins); \
554 * link_bblock: Links two basic blocks
556 * links two basic blocks in the control flow graph, the 'from'
557 * argument is the starting block and the 'to' argument is the block
558 * the control flow ends to after 'from'.
561 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
563 MonoBasicBlock **newa;
567 if (from->cil_code) {
569 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
571 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
574 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
576 printf ("edge from entry to exit\n");
581 for (i = 0; i < from->out_count; ++i) {
582 if (to == from->out_bb [i]) {
588 newa = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
589 for (i = 0; i < from->out_count; ++i) {
590 newa [i] = from->out_bb [i];
598 for (i = 0; i < to->in_count; ++i) {
599 if (from == to->in_bb [i]) {
605 newa = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
606 for (i = 0; i < to->in_count; ++i) {
607 newa [i] = to->in_bb [i];
616 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
618 link_bblock (cfg, from, to);
622 * mono_find_block_region:
624 * We mark each basic block with a region ID. We use that to avoid BB
625 * optimizations when blocks are in different regions.
628 * A region token that encodes where this region is, and information
629 * about the clause owner for this block.
631 * The region encodes the try/catch/filter clause that owns this block
632 * as well as the type. -1 is a special value that represents a block
633 * that is in none of try/catch/filter.
636 mono_find_block_region (MonoCompile *cfg, int offset)
638 MonoMethodHeader *header = cfg->header;
639 MonoExceptionClause *clause;
642 for (i = 0; i < header->num_clauses; ++i) {
643 clause = &header->clauses [i];
644 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
645 (offset < (clause->handler_offset)))
646 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
648 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
649 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
650 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
651 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
652 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
654 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
657 for (i = 0; i < header->num_clauses; ++i) {
658 clause = &header->clauses [i];
660 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
661 return ((i + 1) << 8) | clause->flags;
668 ip_in_finally_clause (MonoCompile *cfg, int offset)
670 MonoMethodHeader *header = cfg->header;
671 MonoExceptionClause *clause;
674 for (i = 0; i < header->num_clauses; ++i) {
675 clause = &header->clauses [i];
676 if (clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY && clause->flags != MONO_EXCEPTION_CLAUSE_FAULT)
679 if (MONO_OFFSET_IN_HANDLER (clause, offset))
686 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
688 MonoMethodHeader *header = cfg->header;
689 MonoExceptionClause *clause;
693 for (i = 0; i < header->num_clauses; ++i) {
694 clause = &header->clauses [i];
695 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
696 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
697 if (clause->flags == type)
698 res = g_list_append (res, clause);
705 mono_create_spvar_for_region (MonoCompile *cfg, int region)
709 var = (MonoInst *)g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
713 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
714 /* prevent it from being register allocated */
715 var->flags |= MONO_INST_VOLATILE;
717 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
721 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
723 return (MonoInst *)g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
727 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
731 var = (MonoInst *)g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
735 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
736 /* prevent it from being register allocated */
737 var->flags |= MONO_INST_VOLATILE;
739 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
745 * Returns the type used in the eval stack when @type is loaded.
746 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
749 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
753 type = mini_get_underlying_type (type);
754 inst->klass = klass = mono_class_from_mono_type (type);
756 inst->type = STACK_MP;
761 switch (type->type) {
763 inst->type = STACK_INV;
771 inst->type = STACK_I4;
776 case MONO_TYPE_FNPTR:
777 inst->type = STACK_PTR;
779 case MONO_TYPE_CLASS:
780 case MONO_TYPE_STRING:
781 case MONO_TYPE_OBJECT:
782 case MONO_TYPE_SZARRAY:
783 case MONO_TYPE_ARRAY:
784 inst->type = STACK_OBJ;
788 inst->type = STACK_I8;
791 inst->type = cfg->r4_stack_type;
794 inst->type = STACK_R8;
796 case MONO_TYPE_VALUETYPE:
797 if (type->data.klass->enumtype) {
798 type = mono_class_enum_basetype (type->data.klass);
802 inst->type = STACK_VTYPE;
805 case MONO_TYPE_TYPEDBYREF:
806 inst->klass = mono_defaults.typed_reference_class;
807 inst->type = STACK_VTYPE;
809 case MONO_TYPE_GENERICINST:
810 type = &type->data.generic_class->container_class->byval_arg;
814 g_assert (cfg->gshared);
815 if (mini_is_gsharedvt_type (type)) {
816 g_assert (cfg->gsharedvt);
817 inst->type = STACK_VTYPE;
819 type_to_eval_stack_type (cfg, mini_get_underlying_type (type), inst);
823 g_error ("unknown type 0x%02x in eval stack type", type->type);
828 * The following tables are used to quickly validate the IL code in type_from_op ().
831 bin_num_table [STACK_MAX] [STACK_MAX] = {
832 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
833 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
834 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
835 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
836 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R8},
837 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
838 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
839 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
840 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4}
845 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4
848 /* reduce the size of this table */
850 bin_int_table [STACK_MAX] [STACK_MAX] = {
851 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
852 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
853 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
854 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
855 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
856 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
857 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
858 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
862 bin_comp_table [STACK_MAX] [STACK_MAX] = {
863 /* Inv i L p F & O vt r4 */
865 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
866 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
867 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
868 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* F, R8 */
869 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
870 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
871 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
872 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* r, r4 */
875 /* reduce the size of this table */
877 shift_table [STACK_MAX] [STACK_MAX] = {
878 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
879 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
880 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
881 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
882 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
883 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
884 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
885 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
889 * Tables to map from the non-specific opcode to the matching
890 * type-specific opcode.
892 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
894 binops_op_map [STACK_MAX] = {
895 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD, 0, 0, OP_RADD-CEE_ADD
898 /* handles from CEE_NEG to CEE_CONV_U8 */
900 unops_op_map [STACK_MAX] = {
901 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG, 0, 0, OP_RNEG-CEE_NEG
904 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
906 ovfops_op_map [STACK_MAX] = {
907 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, 0, OP_RCONV_TO_U2-CEE_CONV_U2
910 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
912 ovf2ops_op_map [STACK_MAX] = {
913 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, 0, 0, OP_RCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
916 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
918 ovf3ops_op_map [STACK_MAX] = {
919 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, 0, 0, OP_RCONV_TO_OVF_I1-CEE_CONV_OVF_I1
922 /* handles from CEE_BEQ to CEE_BLT_UN */
924 beqops_op_map [STACK_MAX] = {
925 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, 0, OP_FBEQ-CEE_BEQ
928 /* handles from CEE_CEQ to CEE_CLT_UN */
930 ceqops_op_map [STACK_MAX] = {
931 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, 0, OP_RCEQ-OP_CEQ
935 * Sets ins->type (the type on the eval stack) according to the
936 * type of the opcode and the arguments to it.
937 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
939 * FIXME: this function sets ins->type unconditionally in some cases, but
940 * it should set it to invalid for some types (a conv.x on an object)
943 type_from_op (MonoCompile *cfg, MonoInst *ins, MonoInst *src1, MonoInst *src2)
945 switch (ins->opcode) {
952 /* FIXME: check unverifiable args for STACK_MP */
953 ins->type = bin_num_table [src1->type] [src2->type];
954 ins->opcode += binops_op_map [ins->type];
961 ins->type = bin_int_table [src1->type] [src2->type];
962 ins->opcode += binops_op_map [ins->type];
967 ins->type = shift_table [src1->type] [src2->type];
968 ins->opcode += binops_op_map [ins->type];
973 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
974 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
975 ins->opcode = OP_LCOMPARE;
976 else if (src1->type == STACK_R4)
977 ins->opcode = OP_RCOMPARE;
978 else if (src1->type == STACK_R8)
979 ins->opcode = OP_FCOMPARE;
981 ins->opcode = OP_ICOMPARE;
983 case OP_ICOMPARE_IMM:
984 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
985 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
986 ins->opcode = OP_LCOMPARE_IMM;
998 ins->opcode += beqops_op_map [src1->type];
1001 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
1002 ins->opcode += ceqops_op_map [src1->type];
1008 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
1009 ins->opcode += ceqops_op_map [src1->type];
1013 ins->type = neg_table [src1->type];
1014 ins->opcode += unops_op_map [ins->type];
1017 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
1018 ins->type = src1->type;
1020 ins->type = STACK_INV;
1021 ins->opcode += unops_op_map [ins->type];
1027 ins->type = STACK_I4;
1028 ins->opcode += unops_op_map [src1->type];
1031 ins->type = STACK_R8;
1032 switch (src1->type) {
1035 ins->opcode = OP_ICONV_TO_R_UN;
1038 ins->opcode = OP_LCONV_TO_R_UN;
1042 case CEE_CONV_OVF_I1:
1043 case CEE_CONV_OVF_U1:
1044 case CEE_CONV_OVF_I2:
1045 case CEE_CONV_OVF_U2:
1046 case CEE_CONV_OVF_I4:
1047 case CEE_CONV_OVF_U4:
1048 ins->type = STACK_I4;
1049 ins->opcode += ovf3ops_op_map [src1->type];
1051 case CEE_CONV_OVF_I_UN:
1052 case CEE_CONV_OVF_U_UN:
1053 ins->type = STACK_PTR;
1054 ins->opcode += ovf2ops_op_map [src1->type];
1056 case CEE_CONV_OVF_I1_UN:
1057 case CEE_CONV_OVF_I2_UN:
1058 case CEE_CONV_OVF_I4_UN:
1059 case CEE_CONV_OVF_U1_UN:
1060 case CEE_CONV_OVF_U2_UN:
1061 case CEE_CONV_OVF_U4_UN:
1062 ins->type = STACK_I4;
1063 ins->opcode += ovf2ops_op_map [src1->type];
1066 ins->type = STACK_PTR;
1067 switch (src1->type) {
1069 ins->opcode = OP_ICONV_TO_U;
1073 #if SIZEOF_VOID_P == 8
1074 ins->opcode = OP_LCONV_TO_U;
1076 ins->opcode = OP_MOVE;
1080 ins->opcode = OP_LCONV_TO_U;
1083 ins->opcode = OP_FCONV_TO_U;
1089 ins->type = STACK_I8;
1090 ins->opcode += unops_op_map [src1->type];
1092 case CEE_CONV_OVF_I8:
1093 case CEE_CONV_OVF_U8:
1094 ins->type = STACK_I8;
1095 ins->opcode += ovf3ops_op_map [src1->type];
1097 case CEE_CONV_OVF_U8_UN:
1098 case CEE_CONV_OVF_I8_UN:
1099 ins->type = STACK_I8;
1100 ins->opcode += ovf2ops_op_map [src1->type];
1103 ins->type = cfg->r4_stack_type;
1104 ins->opcode += unops_op_map [src1->type];
1107 ins->type = STACK_R8;
1108 ins->opcode += unops_op_map [src1->type];
1111 ins->type = STACK_R8;
1115 ins->type = STACK_I4;
1116 ins->opcode += ovfops_op_map [src1->type];
1119 case CEE_CONV_OVF_I:
1120 case CEE_CONV_OVF_U:
1121 ins->type = STACK_PTR;
1122 ins->opcode += ovfops_op_map [src1->type];
1125 case CEE_ADD_OVF_UN:
1127 case CEE_MUL_OVF_UN:
1129 case CEE_SUB_OVF_UN:
1130 ins->type = bin_num_table [src1->type] [src2->type];
1131 ins->opcode += ovfops_op_map [src1->type];
1132 if (ins->type == STACK_R8)
1133 ins->type = STACK_INV;
1135 case OP_LOAD_MEMBASE:
1136 ins->type = STACK_PTR;
1138 case OP_LOADI1_MEMBASE:
1139 case OP_LOADU1_MEMBASE:
1140 case OP_LOADI2_MEMBASE:
1141 case OP_LOADU2_MEMBASE:
1142 case OP_LOADI4_MEMBASE:
1143 case OP_LOADU4_MEMBASE:
1144 ins->type = STACK_PTR;
1146 case OP_LOADI8_MEMBASE:
1147 ins->type = STACK_I8;
1149 case OP_LOADR4_MEMBASE:
1150 ins->type = cfg->r4_stack_type;
1152 case OP_LOADR8_MEMBASE:
1153 ins->type = STACK_R8;
1156 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
1160 if (ins->type == STACK_MP)
1161 ins->klass = mono_defaults.object_class;
1165 ldind_to_type (int op)
1168 case CEE_LDIND_I1: return mono_defaults.sbyte_class;
1169 case CEE_LDIND_U1: return mono_defaults.byte_class;
1170 case CEE_LDIND_I2: return mono_defaults.int16_class;
1171 case CEE_LDIND_U2: return mono_defaults.uint16_class;
1172 case CEE_LDIND_I4: return mono_defaults.int32_class;
1173 case CEE_LDIND_U4: return mono_defaults.uint32_class;
1174 case CEE_LDIND_I8: return mono_defaults.int64_class;
1175 case CEE_LDIND_I: return mono_defaults.int_class;
1176 case CEE_LDIND_R4: return mono_defaults.single_class;
1177 case CEE_LDIND_R8: return mono_defaults.double_class;
1178 case CEE_LDIND_REF:return mono_defaults.object_class; //FIXME we should try to return a more specific type
1179 default: g_error ("Unknown ldind type %d", op);
1186 param_table [STACK_MAX] [STACK_MAX] = {
1191 check_values_to_signature (MonoInst *args, MonoType *this_ins, MonoMethodSignature *sig)
1196 switch (args->type) {
1206 for (i = 0; i < sig->param_count; ++i) {
1207 switch (args [i].type) {
1211 if (!sig->params [i]->byref)
1215 if (sig->params [i]->byref)
1217 switch (sig->params [i]->type) {
1218 case MONO_TYPE_CLASS:
1219 case MONO_TYPE_STRING:
1220 case MONO_TYPE_OBJECT:
1221 case MONO_TYPE_SZARRAY:
1222 case MONO_TYPE_ARRAY:
1229 if (sig->params [i]->byref)
1231 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1240 /*if (!param_table [args [i].type] [sig->params [i]->type])
1248 * When we need a pointer to the current domain many times in a method, we
1249 * call mono_domain_get() once and we store the result in a local variable.
1250 * This function returns the variable that represents the MonoDomain*.
1252 inline static MonoInst *
1253 mono_get_domainvar (MonoCompile *cfg)
1255 if (!cfg->domainvar)
1256 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1257 return cfg->domainvar;
1261 * The got_var contains the address of the Global Offset Table when AOT
1265 mono_get_got_var (MonoCompile *cfg)
1267 if (!cfg->compile_aot || !cfg->backend->need_got_var)
1269 if (!cfg->got_var) {
1270 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1272 return cfg->got_var;
1276 mono_create_rgctx_var (MonoCompile *cfg)
1278 if (!cfg->rgctx_var) {
1279 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1280 /* force the var to be stack allocated */
1281 cfg->rgctx_var->flags |= MONO_INST_VOLATILE;
1286 mono_get_vtable_var (MonoCompile *cfg)
1288 g_assert (cfg->gshared);
1290 mono_create_rgctx_var (cfg);
1292 return cfg->rgctx_var;
1296 type_from_stack_type (MonoInst *ins) {
1297 switch (ins->type) {
1298 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1299 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1300 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1301 case STACK_R4: return &mono_defaults.single_class->byval_arg;
1302 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1304 return &ins->klass->this_arg;
1305 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1306 case STACK_VTYPE: return &ins->klass->byval_arg;
1308 g_error ("stack type %d to monotype not handled\n", ins->type);
1313 static G_GNUC_UNUSED int
1314 type_to_stack_type (MonoCompile *cfg, MonoType *t)
1316 t = mono_type_get_underlying_type (t);
1328 case MONO_TYPE_FNPTR:
1330 case MONO_TYPE_CLASS:
1331 case MONO_TYPE_STRING:
1332 case MONO_TYPE_OBJECT:
1333 case MONO_TYPE_SZARRAY:
1334 case MONO_TYPE_ARRAY:
1340 return cfg->r4_stack_type;
1343 case MONO_TYPE_VALUETYPE:
1344 case MONO_TYPE_TYPEDBYREF:
1346 case MONO_TYPE_GENERICINST:
1347 if (mono_type_generic_inst_is_valuetype (t))
1353 g_assert_not_reached ();
1360 array_access_to_klass (int opcode)
1364 return mono_defaults.byte_class;
1366 return mono_defaults.uint16_class;
1369 return mono_defaults.int_class;
1372 return mono_defaults.sbyte_class;
1375 return mono_defaults.int16_class;
1378 return mono_defaults.int32_class;
1380 return mono_defaults.uint32_class;
1383 return mono_defaults.int64_class;
1386 return mono_defaults.single_class;
1389 return mono_defaults.double_class;
1390 case CEE_LDELEM_REF:
1391 case CEE_STELEM_REF:
1392 return mono_defaults.object_class;
1394 g_assert_not_reached ();
1400 * We try to share variables when possible
1403 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1408 /* inlining can result in deeper stacks */
1409 if (slot >= cfg->header->max_stack)
1410 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1412 pos = ins->type - 1 + slot * STACK_MAX;
1414 switch (ins->type) {
1421 if ((vnum = cfg->intvars [pos]))
1422 return cfg->varinfo [vnum];
1423 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1424 cfg->intvars [pos] = res->inst_c0;
1427 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1433 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1436 * Don't use this if a generic_context is set, since that means AOT can't
1437 * look up the method using just the image+token.
1438 * table == 0 means this is a reference made from a wrapper.
1440 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1441 MonoJumpInfoToken *jump_info_token = (MonoJumpInfoToken *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1442 jump_info_token->image = image;
1443 jump_info_token->token = token;
1444 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1449 * This function is called to handle items that are left on the evaluation stack
1450 * at basic block boundaries. What happens is that we save the values to local variables
1451 * and we reload them later when first entering the target basic block (with the
1452 * handle_loaded_temps () function).
1453 * A single joint point will use the same variables (stored in the array bb->out_stack or
1454 * bb->in_stack, if the basic block is before or after the joint point).
1456 * This function needs to be called _before_ emitting the last instruction of
1457 * the bb (i.e. before emitting a branch).
1458 * If the stack merge fails at a join point, cfg->unverifiable is set.
1461 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1464 MonoBasicBlock *bb = cfg->cbb;
1465 MonoBasicBlock *outb;
1466 MonoInst *inst, **locals;
1471 if (cfg->verbose_level > 3)
1472 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1473 if (!bb->out_scount) {
1474 bb->out_scount = count;
1475 //printf ("bblock %d has out:", bb->block_num);
1477 for (i = 0; i < bb->out_count; ++i) {
1478 outb = bb->out_bb [i];
1479 /* exception handlers are linked, but they should not be considered for stack args */
1480 if (outb->flags & BB_EXCEPTION_HANDLER)
1482 //printf (" %d", outb->block_num);
1483 if (outb->in_stack) {
1485 bb->out_stack = outb->in_stack;
1491 bb->out_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1492 for (i = 0; i < count; ++i) {
1494 * try to reuse temps already allocated for this purpouse, if they occupy the same
1495 * stack slot and if they are of the same type.
1496 * This won't cause conflicts since if 'local' is used to
1497 * store one of the values in the in_stack of a bblock, then
1498 * the same variable will be used for the same outgoing stack
1500 * This doesn't work when inlining methods, since the bblocks
1501 * in the inlined methods do not inherit their in_stack from
1502 * the bblock they are inlined to. See bug #58863 for an
1505 if (cfg->inlined_method)
1506 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1508 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1513 for (i = 0; i < bb->out_count; ++i) {
1514 outb = bb->out_bb [i];
1515 /* exception handlers are linked, but they should not be considered for stack args */
1516 if (outb->flags & BB_EXCEPTION_HANDLER)
1518 if (outb->in_scount) {
1519 if (outb->in_scount != bb->out_scount) {
1520 cfg->unverifiable = TRUE;
1523 continue; /* check they are the same locals */
1525 outb->in_scount = count;
1526 outb->in_stack = bb->out_stack;
1529 locals = bb->out_stack;
1531 for (i = 0; i < count; ++i) {
1532 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1533 inst->cil_code = sp [i]->cil_code;
1534 sp [i] = locals [i];
1535 if (cfg->verbose_level > 3)
1536 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1540 * It is possible that the out bblocks already have in_stack assigned, and
1541 * the in_stacks differ. In this case, we will store to all the different
1548 /* Find a bblock which has a different in_stack */
1550 while (bindex < bb->out_count) {
1551 outb = bb->out_bb [bindex];
1552 /* exception handlers are linked, but they should not be considered for stack args */
1553 if (outb->flags & BB_EXCEPTION_HANDLER) {
1557 if (outb->in_stack != locals) {
1558 for (i = 0; i < count; ++i) {
1559 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1560 inst->cil_code = sp [i]->cil_code;
1561 sp [i] = locals [i];
1562 if (cfg->verbose_level > 3)
1563 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1565 locals = outb->in_stack;
1575 mini_emit_runtime_constant (MonoCompile *cfg, MonoJumpInfoType patch_type, gpointer data)
1579 if (cfg->compile_aot) {
1580 EMIT_NEW_AOTCONST (cfg, ins, patch_type, data);
1586 ji.type = patch_type;
1587 ji.data.target = data;
1588 target = mono_resolve_patch_target (NULL, cfg->domain, NULL, &ji, FALSE, &error);
1589 mono_error_assert_ok (&error);
1591 EMIT_NEW_PCONST (cfg, ins, target);
1597 mono_create_fast_tls_getter (MonoCompile *cfg, MonoTlsKey key)
1599 int tls_offset = mono_tls_get_tls_offset (key);
1601 if (cfg->compile_aot)
1604 if (tls_offset != -1 && mono_arch_have_fast_tls ()) {
1606 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
1607 ins->dreg = mono_alloc_preg (cfg);
1608 ins->inst_offset = tls_offset;
1615 mono_create_fast_tls_setter (MonoCompile *cfg, MonoInst* value, MonoTlsKey key)
1617 int tls_offset = mono_tls_get_tls_offset (key);
1619 if (cfg->compile_aot)
1622 if (tls_offset != -1 && mono_arch_have_fast_tls ()) {
1624 MONO_INST_NEW (cfg, ins, OP_TLS_SET);
1625 ins->sreg1 = value->dreg;
1626 ins->inst_offset = tls_offset;
1634 mono_create_tls_get (MonoCompile *cfg, MonoTlsKey key)
1636 MonoInst *fast_tls = NULL;
1638 if (!mini_get_debug_options ()->use_fallback_tls)
1639 fast_tls = mono_create_fast_tls_getter (cfg, key);
1642 MONO_ADD_INS (cfg->cbb, fast_tls);
1646 if (cfg->compile_aot) {
1649 * tls getters are critical pieces of code and we don't want to resolve them
1650 * through the standard plt/tramp mechanism since we might expose ourselves
1651 * to crashes and infinite recursions.
1653 EMIT_NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_GET_TLS_TRAMP, (void*)key);
1654 return mini_emit_calli (cfg, helper_sig_get_tls_tramp, NULL, addr, NULL, NULL);
1656 gpointer getter = mono_tls_get_tls_getter (key, FALSE);
1657 return mono_emit_jit_icall (cfg, getter, NULL);
1662 mono_create_tls_set (MonoCompile *cfg, MonoInst *value, MonoTlsKey key)
1664 MonoInst *fast_tls = NULL;
1666 if (!mini_get_debug_options ()->use_fallback_tls)
1667 fast_tls = mono_create_fast_tls_setter (cfg, value, key);
1670 MONO_ADD_INS (cfg->cbb, fast_tls);
1674 if (cfg->compile_aot) {
1676 EMIT_NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_SET_TLS_TRAMP, (void*)key);
1677 return mini_emit_calli (cfg, helper_sig_set_tls_tramp, &value, addr, NULL, NULL);
1679 gpointer setter = mono_tls_get_tls_setter (key, FALSE);
1680 return mono_emit_jit_icall (cfg, setter, &value);
1687 * Emit IR to push the current LMF onto the LMF stack.
1690 emit_push_lmf (MonoCompile *cfg)
1693 * Emit IR to push the LMF:
1694 * lmf_addr = <lmf_addr from tls>
1695 * lmf->lmf_addr = lmf_addr
1696 * lmf->prev_lmf = *lmf_addr
1699 MonoInst *ins, *lmf_ins;
1704 int lmf_reg, prev_lmf_reg;
1706 * Store lmf_addr in a variable, so it can be allocated to a global register.
1708 if (!cfg->lmf_addr_var)
1709 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1712 ins = mono_create_tls_get (cfg, TLS_KEY_JIT_TLS);
1714 int jit_tls_dreg = ins->dreg;
1716 lmf_reg = alloc_preg (cfg);
1717 EMIT_NEW_BIALU_IMM (cfg, lmf_ins, OP_PADD_IMM, lmf_reg, jit_tls_dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
1719 lmf_ins = mono_create_tls_get (cfg, TLS_KEY_LMF_ADDR);
1722 lmf_ins->dreg = cfg->lmf_addr_var->dreg;
1724 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
1725 lmf_reg = ins->dreg;
1727 prev_lmf_reg = alloc_preg (cfg);
1728 /* Save previous_lmf */
1729 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, cfg->lmf_addr_var->dreg, 0);
1730 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), prev_lmf_reg);
1732 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, cfg->lmf_addr_var->dreg, 0, lmf_reg);
1738 * Emit IR to pop the current LMF from the LMF stack.
1741 emit_pop_lmf (MonoCompile *cfg)
1743 int lmf_reg, lmf_addr_reg;
1749 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
1750 lmf_reg = ins->dreg;
1754 * Emit IR to pop the LMF:
1755 * *(lmf->lmf_addr) = lmf->prev_lmf
1757 /* This could be called before emit_push_lmf () */
1758 if (!cfg->lmf_addr_var)
1759 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1760 lmf_addr_reg = cfg->lmf_addr_var->dreg;
1762 prev_lmf_reg = alloc_preg (cfg);
1763 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
1764 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_addr_reg, 0, prev_lmf_reg);
1768 emit_instrumentation_call (MonoCompile *cfg, void *func)
1770 MonoInst *iargs [1];
1773 * Avoid instrumenting inlined methods since it can
1774 * distort profiling results.
1776 if (cfg->method != cfg->current_method)
1779 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE) {
1780 EMIT_NEW_METHODCONST (cfg, iargs [0], cfg->method);
1781 mono_emit_jit_icall (cfg, func, iargs);
1786 ret_type_to_call_opcode (MonoCompile *cfg, MonoType *type, int calli, int virt)
1789 type = mini_get_underlying_type (type);
1790 switch (type->type) {
1791 case MONO_TYPE_VOID:
1792 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALL_MEMBASE: OP_VOIDCALL;
1799 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
1803 case MONO_TYPE_FNPTR:
1804 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
1805 case MONO_TYPE_CLASS:
1806 case MONO_TYPE_STRING:
1807 case MONO_TYPE_OBJECT:
1808 case MONO_TYPE_SZARRAY:
1809 case MONO_TYPE_ARRAY:
1810 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
1813 return calli? OP_LCALL_REG: virt? OP_LCALL_MEMBASE: OP_LCALL;
1816 return calli? OP_RCALL_REG: virt? OP_RCALL_MEMBASE: OP_RCALL;
1818 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
1820 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
1821 case MONO_TYPE_VALUETYPE:
1822 if (type->data.klass->enumtype) {
1823 type = mono_class_enum_basetype (type->data.klass);
1826 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
1827 case MONO_TYPE_TYPEDBYREF:
1828 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
1829 case MONO_TYPE_GENERICINST:
1830 type = &type->data.generic_class->container_class->byval_arg;
1833 case MONO_TYPE_MVAR:
1835 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
1837 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
1842 //XXX this ignores if t is byref
1843 #define MONO_TYPE_IS_PRIMITIVE_SCALAR(t) ((((((t)->type >= MONO_TYPE_BOOLEAN && (t)->type <= MONO_TYPE_U8) || ((t)->type >= MONO_TYPE_I && (t)->type <= MONO_TYPE_U)))))
1846 * target_type_is_incompatible:
1847 * @cfg: MonoCompile context
1849 * Check that the item @arg on the evaluation stack can be stored
1850 * in the target type (can be a local, or field, etc).
1851 * The cfg arg can be used to check if we need verification or just
1854 * Returns: non-0 value if arg can't be stored on a target.
1857 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
1859 MonoType *simple_type;
1862 if (target->byref) {
1863 /* FIXME: check that the pointed to types match */
1864 if (arg->type == STACK_MP) {
1865 /* This is needed to handle gshared types + ldaddr. We lower the types so we can handle enums and other typedef-like types. */
1866 MonoClass *target_class_lowered = mono_class_from_mono_type (mini_get_underlying_type (&mono_class_from_mono_type (target)->byval_arg));
1867 MonoClass *source_class_lowered = mono_class_from_mono_type (mini_get_underlying_type (&arg->klass->byval_arg));
1869 /* if the target is native int& or same type */
1870 if (target->type == MONO_TYPE_I || target_class_lowered == source_class_lowered)
1873 /* Both are primitive type byrefs and the source points to a larger type that the destination */
1874 if (MONO_TYPE_IS_PRIMITIVE_SCALAR (&target_class_lowered->byval_arg) && MONO_TYPE_IS_PRIMITIVE_SCALAR (&source_class_lowered->byval_arg) &&
1875 mono_class_instance_size (target_class_lowered) <= mono_class_instance_size (source_class_lowered))
1879 if (arg->type == STACK_PTR)
1884 simple_type = mini_get_underlying_type (target);
1885 switch (simple_type->type) {
1886 case MONO_TYPE_VOID:
1894 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1898 /* STACK_MP is needed when setting pinned locals */
1899 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1904 case MONO_TYPE_FNPTR:
1906 * Some opcodes like ldloca returns 'transient pointers' which can be stored in
1907 * in native int. (#688008).
1909 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1912 case MONO_TYPE_CLASS:
1913 case MONO_TYPE_STRING:
1914 case MONO_TYPE_OBJECT:
1915 case MONO_TYPE_SZARRAY:
1916 case MONO_TYPE_ARRAY:
1917 if (arg->type != STACK_OBJ)
1919 /* FIXME: check type compatibility */
1923 if (arg->type != STACK_I8)
1927 if (arg->type != cfg->r4_stack_type)
1931 if (arg->type != STACK_R8)
1934 case MONO_TYPE_VALUETYPE:
1935 if (arg->type != STACK_VTYPE)
1937 klass = mono_class_from_mono_type (simple_type);
1938 if (klass != arg->klass)
1941 case MONO_TYPE_TYPEDBYREF:
1942 if (arg->type != STACK_VTYPE)
1944 klass = mono_class_from_mono_type (simple_type);
1945 if (klass != arg->klass)
1948 case MONO_TYPE_GENERICINST:
1949 if (mono_type_generic_inst_is_valuetype (simple_type)) {
1950 MonoClass *target_class;
1951 if (arg->type != STACK_VTYPE)
1953 klass = mono_class_from_mono_type (simple_type);
1954 target_class = mono_class_from_mono_type (target);
1955 /* The second cases is needed when doing partial sharing */
1956 if (klass != arg->klass && target_class != arg->klass && target_class != mono_class_from_mono_type (mini_get_underlying_type (&arg->klass->byval_arg)))
1960 if (arg->type != STACK_OBJ)
1962 /* FIXME: check type compatibility */
1966 case MONO_TYPE_MVAR:
1967 g_assert (cfg->gshared);
1968 if (mini_type_var_is_vt (simple_type)) {
1969 if (arg->type != STACK_VTYPE)
1972 if (arg->type != STACK_OBJ)
1977 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
1983 * Prepare arguments for passing to a function call.
1984 * Return a non-zero value if the arguments can't be passed to the given
1986 * The type checks are not yet complete and some conversions may need
1987 * casts on 32 or 64 bit architectures.
1989 * FIXME: implement this using target_type_is_incompatible ()
1992 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
1994 MonoType *simple_type;
1998 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
2002 for (i = 0; i < sig->param_count; ++i) {
2003 if (sig->params [i]->byref) {
2004 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
2008 simple_type = mini_get_underlying_type (sig->params [i]);
2010 switch (simple_type->type) {
2011 case MONO_TYPE_VOID:
2020 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2026 case MONO_TYPE_FNPTR:
2027 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2030 case MONO_TYPE_CLASS:
2031 case MONO_TYPE_STRING:
2032 case MONO_TYPE_OBJECT:
2033 case MONO_TYPE_SZARRAY:
2034 case MONO_TYPE_ARRAY:
2035 if (args [i]->type != STACK_OBJ)
2040 if (args [i]->type != STACK_I8)
2044 if (args [i]->type != cfg->r4_stack_type)
2048 if (args [i]->type != STACK_R8)
2051 case MONO_TYPE_VALUETYPE:
2052 if (simple_type->data.klass->enumtype) {
2053 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2056 if (args [i]->type != STACK_VTYPE)
2059 case MONO_TYPE_TYPEDBYREF:
2060 if (args [i]->type != STACK_VTYPE)
2063 case MONO_TYPE_GENERICINST:
2064 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2067 case MONO_TYPE_MVAR:
2069 if (args [i]->type != STACK_VTYPE)
2073 g_error ("unknown type 0x%02x in check_call_signature",
2081 callvirt_to_call (int opcode)
2084 case OP_CALL_MEMBASE:
2086 case OP_VOIDCALL_MEMBASE:
2088 case OP_FCALL_MEMBASE:
2090 case OP_RCALL_MEMBASE:
2092 case OP_VCALL_MEMBASE:
2094 case OP_LCALL_MEMBASE:
2097 g_assert_not_reached ();
2104 callvirt_to_call_reg (int opcode)
2107 case OP_CALL_MEMBASE:
2109 case OP_VOIDCALL_MEMBASE:
2110 return OP_VOIDCALL_REG;
2111 case OP_FCALL_MEMBASE:
2112 return OP_FCALL_REG;
2113 case OP_RCALL_MEMBASE:
2114 return OP_RCALL_REG;
2115 case OP_VCALL_MEMBASE:
2116 return OP_VCALL_REG;
2117 case OP_LCALL_MEMBASE:
2118 return OP_LCALL_REG;
2120 g_assert_not_reached ();
2126 /* Either METHOD or IMT_ARG needs to be set */
2128 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoMethod *method, MonoInst *imt_arg)
2132 if (COMPILE_LLVM (cfg)) {
2134 method_reg = alloc_preg (cfg);
2135 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2137 MonoInst *ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_METHODCONST, method);
2138 method_reg = ins->dreg;
2142 call->imt_arg_reg = method_reg;
2144 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2149 method_reg = alloc_preg (cfg);
2150 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2152 MonoInst *ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_METHODCONST, method);
2153 method_reg = ins->dreg;
2156 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2159 static MonoJumpInfo *
2160 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2162 MonoJumpInfo *ji = (MonoJumpInfo *)mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2166 ji->data.target = target;
2172 mini_class_check_context_used (MonoCompile *cfg, MonoClass *klass)
2175 return mono_class_check_context_used (klass);
2181 mini_method_check_context_used (MonoCompile *cfg, MonoMethod *method)
2184 return mono_method_check_context_used (method);
2190 * check_method_sharing:
2192 * Check whenever the vtable or an mrgctx needs to be passed when calling CMETHOD.
2195 check_method_sharing (MonoCompile *cfg, MonoMethod *cmethod, gboolean *out_pass_vtable, gboolean *out_pass_mrgctx)
2197 gboolean pass_vtable = FALSE;
2198 gboolean pass_mrgctx = FALSE;
2200 if (((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
2201 (mono_class_is_ginst (cmethod->klass) || mono_class_is_gtd (cmethod->klass))) {
2202 gboolean sharable = FALSE;
2204 if (mono_method_is_generic_sharable_full (cmethod, TRUE, TRUE, TRUE))
2208 * Pass vtable iff target method might
2209 * be shared, which means that sharing
2210 * is enabled for its class and its
2211 * context is sharable (and it's not a
2214 if (sharable && !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
2218 if (mini_method_get_context (cmethod) &&
2219 mini_method_get_context (cmethod)->method_inst) {
2220 g_assert (!pass_vtable);
2222 if (mono_method_is_generic_sharable_full (cmethod, TRUE, TRUE, TRUE)) {
2225 if (cfg->gsharedvt && mini_is_gsharedvt_signature (mono_method_signature (cmethod)))
2230 if (out_pass_vtable)
2231 *out_pass_vtable = pass_vtable;
2232 if (out_pass_mrgctx)
2233 *out_pass_mrgctx = pass_mrgctx;
2236 inline static MonoCallInst *
2237 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2238 MonoInst **args, int calli, int virtual_, int tail, int rgctx, int unbox_trampoline)
2242 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2250 emit_instrumentation_call (cfg, mono_profiler_method_leave);
2252 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2254 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (cfg, sig->ret, calli, virtual_));
2257 call->signature = sig;
2258 call->rgctx_reg = rgctx;
2259 sig_ret = mini_get_underlying_type (sig->ret);
2261 type_to_eval_stack_type ((cfg), sig_ret, &call->inst);
2264 if (mini_type_is_vtype (sig_ret)) {
2265 call->vret_var = cfg->vret_addr;
2266 //g_assert_not_reached ();
2268 } else if (mini_type_is_vtype (sig_ret)) {
2269 MonoInst *temp = mono_compile_create_var (cfg, sig_ret, OP_LOCAL);
2272 temp->backend.is_pinvoke = sig->pinvoke;
2275 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2276 * address of return value to increase optimization opportunities.
2277 * Before vtype decomposition, the dreg of the call ins itself represents the
2278 * fact the call modifies the return value. After decomposition, the call will
2279 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2280 * will be transformed into an LDADDR.
2282 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2283 loada->dreg = alloc_preg (cfg);
2284 loada->inst_p0 = temp;
2285 /* We reference the call too since call->dreg could change during optimization */
2286 loada->inst_p1 = call;
2287 MONO_ADD_INS (cfg->cbb, loada);
2289 call->inst.dreg = temp->dreg;
2291 call->vret_var = loada;
2292 } else if (!MONO_TYPE_IS_VOID (sig_ret))
2293 call->inst.dreg = alloc_dreg (cfg, (MonoStackType)call->inst.type);
2295 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2296 if (COMPILE_SOFT_FLOAT (cfg)) {
2298 * If the call has a float argument, we would need to do an r8->r4 conversion using
2299 * an icall, but that cannot be done during the call sequence since it would clobber
2300 * the call registers + the stack. So we do it before emitting the call.
2302 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2304 MonoInst *in = call->args [i];
2306 if (i >= sig->hasthis)
2307 t = sig->params [i - sig->hasthis];
2309 t = &mono_defaults.int_class->byval_arg;
2310 t = mono_type_get_underlying_type (t);
2312 if (!t->byref && t->type == MONO_TYPE_R4) {
2313 MonoInst *iargs [1];
2317 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2319 /* The result will be in an int vreg */
2320 call->args [i] = conv;
2326 call->need_unbox_trampoline = unbox_trampoline;
2329 if (COMPILE_LLVM (cfg))
2330 mono_llvm_emit_call (cfg, call);
2332 mono_arch_emit_call (cfg, call);
2334 mono_arch_emit_call (cfg, call);
2337 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2338 cfg->flags |= MONO_CFG_HAS_CALLS;
2344 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2346 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2347 cfg->uses_rgctx_reg = TRUE;
2348 call->rgctx_reg = TRUE;
2350 call->rgctx_arg_reg = rgctx_reg;
2355 mini_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *imt_arg, MonoInst *rgctx_arg)
2360 gboolean check_sp = FALSE;
2362 if (cfg->check_pinvoke_callconv && cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
2363 WrapperInfo *info = mono_marshal_get_wrapper_info (cfg->method);
2365 if (info && info->subtype == WRAPPER_SUBTYPE_PINVOKE)
2370 rgctx_reg = mono_alloc_preg (cfg);
2371 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2375 if (!cfg->stack_inbalance_var)
2376 cfg->stack_inbalance_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2378 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2379 ins->dreg = cfg->stack_inbalance_var->dreg;
2380 MONO_ADD_INS (cfg->cbb, ins);
2383 call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE, rgctx_arg ? TRUE : FALSE, FALSE);
2385 call->inst.sreg1 = addr->dreg;
2388 emit_imt_argument (cfg, call, NULL, imt_arg);
2390 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2395 sp_reg = mono_alloc_preg (cfg);
2397 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2399 MONO_ADD_INS (cfg->cbb, ins);
2401 /* Restore the stack so we don't crash when throwing the exception */
2402 MONO_INST_NEW (cfg, ins, OP_SET_SP);
2403 ins->sreg1 = cfg->stack_inbalance_var->dreg;
2404 MONO_ADD_INS (cfg->cbb, ins);
2406 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, cfg->stack_inbalance_var->dreg, sp_reg);
2407 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ExecutionEngineException");
2411 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2413 return (MonoInst*)call;
2417 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type);
2420 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig, gboolean tail,
2421 MonoInst **args, MonoInst *this_ins, MonoInst *imt_arg, MonoInst *rgctx_arg)
2423 #ifndef DISABLE_REMOTING
2424 gboolean might_be_remote = FALSE;
2426 gboolean virtual_ = this_ins != NULL;
2427 gboolean enable_for_aot = TRUE;
2430 MonoInst *call_target = NULL;
2432 gboolean need_unbox_trampoline;
2435 sig = mono_method_signature (method);
2437 if (cfg->llvm_only && (mono_class_is_interface (method->klass)))
2438 g_assert_not_reached ();
2441 rgctx_reg = mono_alloc_preg (cfg);
2442 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2445 if (method->string_ctor) {
2446 /* Create the real signature */
2447 /* FIXME: Cache these */
2448 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2449 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2454 context_used = mini_method_check_context_used (cfg, method);
2456 #ifndef DISABLE_REMOTING
2457 might_be_remote = this_ins && sig->hasthis &&
2458 (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) &&
2459 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && (!MONO_CHECK_THIS (this_ins) || context_used);
2461 if (might_be_remote && context_used) {
2464 g_assert (cfg->gshared);
2466 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2468 return mini_emit_calli (cfg, sig, args, addr, NULL, NULL);
2472 if (cfg->llvm_only && !call_target && virtual_ && (method->flags & METHOD_ATTRIBUTE_VIRTUAL))
2473 return emit_llvmonly_virtual_call (cfg, method, sig, 0, args);
2475 need_unbox_trampoline = method->klass == mono_defaults.object_class || mono_class_is_interface (method->klass);
2477 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual_, tail, rgctx_arg ? TRUE : FALSE, need_unbox_trampoline);
2479 #ifndef DISABLE_REMOTING
2480 if (might_be_remote)
2481 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2484 call->method = method;
2485 call->inst.flags |= MONO_INST_HAS_METHOD;
2486 call->inst.inst_left = this_ins;
2487 call->tail_call = tail;
2490 int vtable_reg, slot_reg, this_reg;
2493 this_reg = this_ins->dreg;
2495 if (!cfg->llvm_only && (method->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (method->name, "Invoke")) {
2496 MonoInst *dummy_use;
2498 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2500 /* Make a call to delegate->invoke_impl */
2501 call->inst.inst_basereg = this_reg;
2502 call->inst.inst_offset = MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2503 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2505 /* We must emit a dummy use here because the delegate trampoline will
2506 replace the 'this' argument with the delegate target making this activation
2507 no longer a root for the delegate.
2508 This is an issue for delegates that target collectible code such as dynamic
2509 methods of GC'able assemblies.
2511 For a test case look into #667921.
2513 FIXME: a dummy use is not the best way to do it as the local register allocator
2514 will put it on a caller save register and spil it around the call.
2515 Ideally, we would either put it on a callee save register or only do the store part.
2517 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [0]);
2519 return (MonoInst*)call;
2522 if ((!cfg->compile_aot || enable_for_aot) &&
2523 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2524 (MONO_METHOD_IS_FINAL (method) &&
2525 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2526 !(mono_class_is_marshalbyref (method->klass) && context_used)) {
2528 * the method is not virtual, we just need to ensure this is not null
2529 * and then we can call the method directly.
2531 #ifndef DISABLE_REMOTING
2532 if (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) {
2534 * The check above ensures method is not gshared, this is needed since
2535 * gshared methods can't have wrappers.
2537 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2541 if (!method->string_ctor)
2542 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2544 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2545 } else if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2547 * the method is virtual, but we can statically dispatch since either
2548 * it's class or the method itself are sealed.
2549 * But first we need to ensure it's not a null reference.
2551 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2553 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2554 } else if (call_target) {
2555 vtable_reg = alloc_preg (cfg);
2556 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2558 call->inst.opcode = callvirt_to_call_reg (call->inst.opcode);
2559 call->inst.sreg1 = call_target->dreg;
2560 call->inst.flags &= !MONO_INST_HAS_METHOD;
2562 vtable_reg = alloc_preg (cfg);
2563 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2564 if (mono_class_is_interface (method->klass)) {
2565 guint32 imt_slot = mono_method_get_imt_slot (method);
2566 emit_imt_argument (cfg, call, call->method, imt_arg);
2567 slot_reg = vtable_reg;
2568 offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2570 slot_reg = vtable_reg;
2571 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) +
2572 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2574 g_assert (mono_method_signature (method)->generic_param_count);
2575 emit_imt_argument (cfg, call, call->method, imt_arg);
2579 call->inst.sreg1 = slot_reg;
2580 call->inst.inst_offset = offset;
2581 call->is_virtual = TRUE;
2585 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2588 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2590 return (MonoInst*)call;
2594 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this_ins)
2596 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), FALSE, args, this_ins, NULL, NULL);
2600 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2607 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE, FALSE, FALSE);
2610 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2612 return (MonoInst*)call;
2616 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2618 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2622 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2626 * mono_emit_abs_call:
2628 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2630 inline static MonoInst*
2631 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2632 MonoMethodSignature *sig, MonoInst **args)
2634 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2638 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2641 if (cfg->abs_patches == NULL)
2642 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2643 g_hash_table_insert (cfg->abs_patches, ji, ji);
2644 ins = mono_emit_native_call (cfg, ji, sig, args);
2645 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2649 static MonoMethodSignature*
2650 sig_to_rgctx_sig (MonoMethodSignature *sig)
2652 // FIXME: memory allocation
2653 MonoMethodSignature *res;
2656 res = (MonoMethodSignature *)g_malloc (MONO_SIZEOF_METHOD_SIGNATURE + (sig->param_count + 1) * sizeof (MonoType*));
2657 memcpy (res, sig, MONO_SIZEOF_METHOD_SIGNATURE);
2658 res->param_count = sig->param_count + 1;
2659 for (i = 0; i < sig->param_count; ++i)
2660 res->params [i] = sig->params [i];
2661 res->params [sig->param_count] = &mono_defaults.int_class->this_arg;
2665 /* Make an indirect call to FSIG passing an additional argument */
2667 emit_extra_arg_calli (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **orig_args, int arg_reg, MonoInst *call_target)
2669 MonoMethodSignature *csig;
2670 MonoInst *args_buf [16];
2672 int i, pindex, tmp_reg;
2674 /* Make a call with an rgctx/extra arg */
2675 if (fsig->param_count + 2 < 16)
2678 args = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (fsig->param_count + 2));
2681 args [pindex ++] = orig_args [0];
2682 for (i = 0; i < fsig->param_count; ++i)
2683 args [pindex ++] = orig_args [fsig->hasthis + i];
2684 tmp_reg = alloc_preg (cfg);
2685 EMIT_NEW_UNALU (cfg, args [pindex], OP_MOVE, tmp_reg, arg_reg);
2686 csig = sig_to_rgctx_sig (fsig);
2687 return mini_emit_calli (cfg, csig, args, call_target, NULL, NULL);
2690 /* Emit an indirect call to the function descriptor ADDR */
2692 emit_llvmonly_calli (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, MonoInst *addr)
2694 int addr_reg, arg_reg;
2695 MonoInst *call_target;
2697 g_assert (cfg->llvm_only);
2700 * addr points to a <addr, arg> pair, load both of them, and
2701 * make a call to addr, passing arg as an extra arg.
2703 addr_reg = alloc_preg (cfg);
2704 EMIT_NEW_LOAD_MEMBASE (cfg, call_target, OP_LOAD_MEMBASE, addr_reg, addr->dreg, 0);
2705 arg_reg = alloc_preg (cfg);
2706 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, arg_reg, addr->dreg, sizeof (gpointer));
2708 return emit_extra_arg_calli (cfg, fsig, args, arg_reg, call_target);
2712 direct_icalls_enabled (MonoCompile *cfg)
2716 /* LLVM on amd64 can't handle calls to non-32 bit addresses */
2718 if (cfg->compile_llvm && !cfg->llvm_only)
2721 if (cfg->gen_sdb_seq_points || cfg->disable_direct_icalls)
2727 mono_emit_jit_icall_by_info (MonoCompile *cfg, int il_offset, MonoJitICallInfo *info, MonoInst **args)
2730 * Call the jit icall without a wrapper if possible.
2731 * The wrapper is needed for the following reasons:
2732 * - to handle exceptions thrown using mono_raise_exceptions () from the
2733 * icall function. The EH code needs the lmf frame pushed by the
2734 * wrapper to be able to unwind back to managed code.
2735 * - to be able to do stack walks for asynchronously suspended
2736 * threads when debugging.
2738 if (info->no_raise && direct_icalls_enabled (cfg)) {
2742 if (!info->wrapper_method) {
2743 name = g_strdup_printf ("__icall_wrapper_%s", info->name);
2744 info->wrapper_method = mono_marshal_get_icall_wrapper (info->sig, name, info->func, TRUE);
2746 mono_memory_barrier ();
2750 * Inline the wrapper method, which is basically a call to the C icall, and
2751 * an exception check.
2753 costs = inline_method (cfg, info->wrapper_method, NULL,
2754 args, NULL, il_offset, TRUE);
2755 g_assert (costs > 0);
2756 g_assert (!MONO_TYPE_IS_VOID (info->sig->ret));
2760 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2765 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2767 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2768 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2772 * Native code might return non register sized integers
2773 * without initializing the upper bits.
2775 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
2776 case OP_LOADI1_MEMBASE:
2777 widen_op = OP_ICONV_TO_I1;
2779 case OP_LOADU1_MEMBASE:
2780 widen_op = OP_ICONV_TO_U1;
2782 case OP_LOADI2_MEMBASE:
2783 widen_op = OP_ICONV_TO_I2;
2785 case OP_LOADU2_MEMBASE:
2786 widen_op = OP_ICONV_TO_U2;
2792 if (widen_op != -1) {
2793 int dreg = alloc_preg (cfg);
2796 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
2797 widen->type = ins->type;
2808 emit_method_access_failure (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
2810 MonoInst *args [16];
2812 args [0] = emit_get_rgctx_method (cfg, mono_method_check_context_used (caller), caller, MONO_RGCTX_INFO_METHOD);
2813 args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (callee), callee, MONO_RGCTX_INFO_METHOD);
2815 mono_emit_jit_icall (cfg, mono_throw_method_access, args);
2819 mini_get_memcpy_method (void)
2821 static MonoMethod *memcpy_method = NULL;
2822 if (!memcpy_method) {
2823 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2825 g_error ("Old corlib found. Install a new one");
2827 return memcpy_method;
2831 create_write_barrier_bitmap (MonoCompile *cfg, MonoClass *klass, unsigned *wb_bitmap, int offset)
2833 MonoClassField *field;
2834 gpointer iter = NULL;
2836 while ((field = mono_class_get_fields (klass, &iter))) {
2839 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
2841 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
2842 if (mini_type_is_reference (mono_field_get_type (field))) {
2843 g_assert ((foffset % SIZEOF_VOID_P) == 0);
2844 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
2846 MonoClass *field_class = mono_class_from_mono_type (field->type);
2847 if (field_class->has_references)
2848 create_write_barrier_bitmap (cfg, field_class, wb_bitmap, offset + foffset);
2854 mini_emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value)
2856 int card_table_shift_bits;
2857 gpointer card_table_mask;
2859 MonoInst *dummy_use;
2860 int nursery_shift_bits;
2861 size_t nursery_size;
2863 if (!cfg->gen_write_barriers)
2866 card_table = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
2868 mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
2870 if (cfg->backend->have_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0 && !COMPILE_LLVM (cfg)) {
2873 MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
2874 wbarrier->sreg1 = ptr->dreg;
2875 wbarrier->sreg2 = value->dreg;
2876 MONO_ADD_INS (cfg->cbb, wbarrier);
2877 } else if (card_table) {
2878 int offset_reg = alloc_preg (cfg);
2883 * We emit a fast light weight write barrier. This always marks cards as in the concurrent
2884 * collector case, so, for the serial collector, it might slightly slow down nursery
2885 * collections. We also expect that the host system and the target system have the same card
2886 * table configuration, which is the case if they have the same pointer size.
2889 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
2890 if (card_table_mask)
2891 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
2893 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
2894 * IMM's larger than 32bits.
2896 ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR, NULL);
2897 card_reg = ins->dreg;
2899 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
2900 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
2902 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
2903 mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
2906 EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
2910 mini_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
2912 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
2913 unsigned need_wb = 0;
2918 /*types with references can't have alignment smaller than sizeof(void*) */
2919 if (align < SIZEOF_VOID_P)
2922 if (size > 5 * SIZEOF_VOID_P)
2925 create_write_barrier_bitmap (cfg, klass, &need_wb, 0);
2927 destreg = iargs [0]->dreg;
2928 srcreg = iargs [1]->dreg;
2931 dest_ptr_reg = alloc_preg (cfg);
2932 tmp_reg = alloc_preg (cfg);
2935 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
2937 while (size >= SIZEOF_VOID_P) {
2938 MonoInst *load_inst;
2939 MONO_INST_NEW (cfg, load_inst, OP_LOAD_MEMBASE);
2940 load_inst->dreg = tmp_reg;
2941 load_inst->inst_basereg = srcreg;
2942 load_inst->inst_offset = offset;
2943 MONO_ADD_INS (cfg->cbb, load_inst);
2945 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
2948 mini_emit_write_barrier (cfg, iargs [0], load_inst);
2950 offset += SIZEOF_VOID_P;
2951 size -= SIZEOF_VOID_P;
2954 /*tmp += sizeof (void*)*/
2955 if (size >= SIZEOF_VOID_P) {
2956 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
2957 MONO_ADD_INS (cfg->cbb, iargs [0]);
2961 /* Those cannot be references since size < sizeof (void*) */
2963 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
2964 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
2970 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
2971 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
2977 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
2978 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
2987 * Emit code to copy a valuetype of type @klass whose address is stored in
2988 * @src->dreg to memory whose address is stored at @dest->dreg.
2991 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
2993 MonoInst *iargs [4];
2996 MonoMethod *memcpy_method;
2997 MonoInst *size_ins = NULL;
2998 MonoInst *memcpy_ins = NULL;
3002 klass = mono_class_from_mono_type (mini_get_underlying_type (&klass->byval_arg));
3005 * This check breaks with spilled vars... need to handle it during verification anyway.
3006 * g_assert (klass && klass == src->klass && klass == dest->klass);
3009 if (mini_is_gsharedvt_klass (klass)) {
3011 size_ins = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3012 memcpy_ins = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_MEMCPY);
3016 n = mono_class_native_size (klass, &align);
3018 n = mono_class_value_size (klass, &align);
3021 align = SIZEOF_VOID_P;
3022 /* if native is true there should be no references in the struct */
3023 if (cfg->gen_write_barriers && (klass->has_references || size_ins) && !native) {
3024 /* Avoid barriers when storing to the stack */
3025 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
3026 (dest->opcode == OP_LDADDR))) {
3032 context_used = mini_class_check_context_used (cfg, klass);
3034 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
3035 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && mini_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
3037 } else if (size_ins || align < SIZEOF_VOID_P) {
3039 iargs [2] = mini_emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3041 iargs [2] = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_CLASS, klass);
3042 if (!cfg->compile_aot)
3043 mono_class_compute_gc_descriptor (klass);
3046 mono_emit_jit_icall (cfg, mono_gsharedvt_value_copy, iargs);
3048 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
3050 /* We don't unroll more than 5 stores to avoid code bloat. */
3051 /*This is harmless and simplify mono_gc_get_range_copy_func */
3052 n += (SIZEOF_VOID_P - 1);
3053 n &= ~(SIZEOF_VOID_P - 1);
3055 EMIT_NEW_ICONST (cfg, iargs [2], n);
3056 mono_emit_jit_icall (cfg, mono_gc_get_range_copy_func (), iargs);
3061 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 8) {
3062 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
3063 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
3068 iargs [2] = size_ins;
3070 EMIT_NEW_ICONST (cfg, iargs [2], n);
3072 memcpy_method = mini_get_memcpy_method ();
3074 mini_emit_calli (cfg, mono_method_signature (memcpy_method), iargs, memcpy_ins, NULL, NULL);
3076 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
3081 mini_get_memset_method (void)
3083 static MonoMethod *memset_method = NULL;
3084 if (!memset_method) {
3085 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
3087 g_error ("Old corlib found. Install a new one");
3089 return memset_method;
3093 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
3095 MonoInst *iargs [3];
3098 MonoMethod *memset_method;
3099 MonoInst *size_ins = NULL;
3100 MonoInst *bzero_ins = NULL;
3101 static MonoMethod *bzero_method;
3103 /* FIXME: Optimize this for the case when dest is an LDADDR */
3104 mono_class_init (klass);
3105 if (mini_is_gsharedvt_klass (klass)) {
3106 size_ins = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3107 bzero_ins = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_BZERO);
3109 bzero_method = mono_class_get_method_from_name (mono_defaults.string_class, "bzero_aligned_1", 2);
3110 g_assert (bzero_method);
3112 iargs [1] = size_ins;
3113 mini_emit_calli (cfg, mono_method_signature (bzero_method), iargs, bzero_ins, NULL, NULL);
3117 klass = mono_class_from_mono_type (mini_get_underlying_type (&klass->byval_arg));
3119 n = mono_class_value_size (klass, &align);
3121 if (n <= sizeof (gpointer) * 8) {
3122 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
3125 memset_method = mini_get_memset_method ();
3127 EMIT_NEW_ICONST (cfg, iargs [1], 0);
3128 EMIT_NEW_ICONST (cfg, iargs [2], n);
3129 mono_emit_method_call (cfg, memset_method, iargs, NULL);
3136 * Emit IR to return either the this pointer for instance method,
3137 * or the mrgctx for static methods.
3140 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
3142 MonoInst *this_ins = NULL;
3144 g_assert (cfg->gshared);
3146 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
3147 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
3148 !method->klass->valuetype)
3149 EMIT_NEW_VARLOAD (cfg, this_ins, cfg->this_arg, &mono_defaults.object_class->byval_arg);
3151 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
3152 MonoInst *mrgctx_loc, *mrgctx_var;
3154 g_assert (!this_ins);
3155 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
3157 mrgctx_loc = mono_get_vtable_var (cfg);
3158 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
3161 } else if (MONO_CLASS_IS_INTERFACE (cfg->method->klass)) {
3162 MonoInst *mrgctx_loc, *mrgctx_var;
3164 /* Default interface methods need an mrgctx since the vtabke at runtime points at an implementing class */
3165 mrgctx_loc = mono_get_vtable_var (cfg);
3166 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
3168 g_assert (mono_method_needs_static_rgctx_invoke (cfg->method, TRUE));
3171 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
3172 MonoInst *vtable_loc, *vtable_var;
3174 g_assert (!this_ins);
3176 vtable_loc = mono_get_vtable_var (cfg);
3177 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
3179 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
3180 MonoInst *mrgctx_var = vtable_var;
3183 vtable_reg = alloc_preg (cfg);
3184 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, MONO_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
3185 vtable_var->type = STACK_PTR;
3193 vtable_reg = alloc_preg (cfg);
3194 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this_ins->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3199 static MonoJumpInfoRgctxEntry *
3200 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, MonoRgctxInfoType info_type)
3202 MonoJumpInfoRgctxEntry *res = (MonoJumpInfoRgctxEntry *)mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
3203 res->method = method;
3204 res->in_mrgctx = in_mrgctx;
3205 res->data = (MonoJumpInfo *)mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
3206 res->data->type = patch_type;
3207 res->data->data.target = patch_data;
3208 res->info_type = info_type;
3213 static inline MonoInst*
3214 emit_rgctx_fetch_inline (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3216 MonoInst *args [16];
3219 // FIXME: No fastpath since the slot is not a compile time constant
3221 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_RGCTX_SLOT_INDEX, entry);
3222 if (entry->in_mrgctx)
3223 call = mono_emit_jit_icall (cfg, mono_fill_method_rgctx, args);
3225 call = mono_emit_jit_icall (cfg, mono_fill_class_rgctx, args);
3229 * FIXME: This can be called during decompose, which is a problem since it creates
3231 * Also, the fastpath doesn't work since the slot number is dynamically allocated.
3233 int i, slot, depth, index, rgctx_reg, val_reg, res_reg;
3235 MonoBasicBlock *is_null_bb, *end_bb;
3236 MonoInst *res, *ins, *call;
3239 slot = mini_get_rgctx_entry_slot (entry);
3241 mrgctx = MONO_RGCTX_SLOT_IS_MRGCTX (slot);
3242 index = MONO_RGCTX_SLOT_INDEX (slot);
3244 index += MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT / sizeof (gpointer);
3245 for (depth = 0; ; ++depth) {
3246 int size = mono_class_rgctx_get_array_size (depth, mrgctx);
3248 if (index < size - 1)
3253 NEW_BBLOCK (cfg, end_bb);
3254 NEW_BBLOCK (cfg, is_null_bb);
3257 rgctx_reg = rgctx->dreg;
3259 rgctx_reg = alloc_preg (cfg);
3261 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, rgctx_reg, rgctx->dreg, MONO_STRUCT_OFFSET (MonoVTable, runtime_generic_context));
3262 // FIXME: Avoid this check by allocating the table when the vtable is created etc.
3263 NEW_BBLOCK (cfg, is_null_bb);
3265 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rgctx_reg, 0);
3266 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3269 for (i = 0; i < depth; ++i) {
3270 int array_reg = alloc_preg (cfg);
3272 /* load ptr to next array */
3273 if (mrgctx && i == 0)
3274 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, rgctx_reg, MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT);
3276 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, rgctx_reg, 0);
3277 rgctx_reg = array_reg;
3278 /* is the ptr null? */
3279 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rgctx_reg, 0);
3280 /* if yes, jump to actual trampoline */
3281 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3285 val_reg = alloc_preg (cfg);
3286 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, val_reg, rgctx_reg, (index + 1) * sizeof (gpointer));
3287 /* is the slot null? */
3288 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, val_reg, 0);
3289 /* if yes, jump to actual trampoline */
3290 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3293 res_reg = alloc_preg (cfg);
3294 MONO_INST_NEW (cfg, ins, OP_MOVE);
3295 ins->dreg = res_reg;
3296 ins->sreg1 = val_reg;
3297 MONO_ADD_INS (cfg->cbb, ins);
3299 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3302 MONO_START_BB (cfg, is_null_bb);
3304 EMIT_NEW_ICONST (cfg, args [1], index);
3306 call = mono_emit_jit_icall (cfg, mono_fill_method_rgctx, args);
3308 call = mono_emit_jit_icall (cfg, mono_fill_class_rgctx, args);
3309 MONO_INST_NEW (cfg, ins, OP_MOVE);
3310 ins->dreg = res_reg;
3311 ins->sreg1 = call->dreg;
3312 MONO_ADD_INS (cfg->cbb, ins);
3313 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3315 MONO_START_BB (cfg, end_bb);
3324 * Emit IR to load the value of the rgctx entry ENTRY from the rgctx
3327 static inline MonoInst*
3328 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3331 return emit_rgctx_fetch_inline (cfg, rgctx, entry);
3333 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
3337 mini_emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
3338 MonoClass *klass, MonoRgctxInfoType rgctx_type)
3340 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
3341 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3343 return emit_rgctx_fetch (cfg, rgctx, entry);
3347 emit_get_rgctx_sig (MonoCompile *cfg, int context_used,
3348 MonoMethodSignature *sig, MonoRgctxInfoType rgctx_type)
3350 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_SIGNATURE, sig, rgctx_type);
3351 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3353 return emit_rgctx_fetch (cfg, rgctx, entry);
3357 emit_get_rgctx_gsharedvt_call (MonoCompile *cfg, int context_used,
3358 MonoMethodSignature *sig, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3360 MonoJumpInfoGSharedVtCall *call_info;
3361 MonoJumpInfoRgctxEntry *entry;
3364 call_info = (MonoJumpInfoGSharedVtCall *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoGSharedVtCall));
3365 call_info->sig = sig;
3366 call_info->method = cmethod;
3368 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_CALL, call_info, rgctx_type);
3369 rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3371 return emit_rgctx_fetch (cfg, rgctx, entry);
3375 * emit_get_rgctx_virt_method:
3377 * Return data for method VIRT_METHOD for a receiver of type KLASS.
3380 emit_get_rgctx_virt_method (MonoCompile *cfg, int context_used,
3381 MonoClass *klass, MonoMethod *virt_method, MonoRgctxInfoType rgctx_type)
3383 MonoJumpInfoVirtMethod *info;
3384 MonoJumpInfoRgctxEntry *entry;
3387 info = (MonoJumpInfoVirtMethod *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoVirtMethod));
3388 info->klass = klass;
3389 info->method = virt_method;
3391 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_VIRT_METHOD, info, rgctx_type);
3392 rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3394 return emit_rgctx_fetch (cfg, rgctx, entry);
3398 emit_get_rgctx_gsharedvt_method (MonoCompile *cfg, int context_used,
3399 MonoMethod *cmethod, MonoGSharedVtMethodInfo *info)
3401 MonoJumpInfoRgctxEntry *entry;
3404 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_METHOD, info, MONO_RGCTX_INFO_METHOD_GSHAREDVT_INFO);
3405 rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3407 return emit_rgctx_fetch (cfg, rgctx, entry);
3411 * emit_get_rgctx_method:
3413 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
3414 * normal constants, else emit a load from the rgctx.
3417 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
3418 MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3420 if (!context_used) {
3423 switch (rgctx_type) {
3424 case MONO_RGCTX_INFO_METHOD:
3425 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
3427 case MONO_RGCTX_INFO_METHOD_RGCTX:
3428 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
3431 g_assert_not_reached ();
3434 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
3435 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3437 return emit_rgctx_fetch (cfg, rgctx, entry);
3442 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
3443 MonoClassField *field, MonoRgctxInfoType rgctx_type)
3445 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
3446 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3448 return emit_rgctx_fetch (cfg, rgctx, entry);
3452 get_gsharedvt_info_slot (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3454 MonoGSharedVtMethodInfo *info = cfg->gsharedvt_info;
3455 MonoRuntimeGenericContextInfoTemplate *template_;
3460 for (i = 0; i < info->num_entries; ++i) {
3461 MonoRuntimeGenericContextInfoTemplate *otemplate = &info->entries [i];
3463 if (otemplate->info_type == rgctx_type && otemplate->data == data && rgctx_type != MONO_RGCTX_INFO_LOCAL_OFFSET)
3467 if (info->num_entries == info->count_entries) {
3468 MonoRuntimeGenericContextInfoTemplate *new_entries;
3469 int new_count_entries = info->count_entries ? info->count_entries * 2 : 16;
3471 new_entries = (MonoRuntimeGenericContextInfoTemplate *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * new_count_entries);
3473 memcpy (new_entries, info->entries, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
3474 info->entries = new_entries;
3475 info->count_entries = new_count_entries;
3478 idx = info->num_entries;
3479 template_ = &info->entries [idx];
3480 template_->info_type = rgctx_type;
3481 template_->data = data;
3483 info->num_entries ++;
3489 * emit_get_gsharedvt_info:
3491 * This is similar to emit_get_rgctx_.., but loads the data from the gsharedvt info var instead of calling an rgctx fetch trampoline.
3494 emit_get_gsharedvt_info (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3499 idx = get_gsharedvt_info_slot (cfg, data, rgctx_type);
3500 /* Load info->entries [idx] */
3501 dreg = alloc_preg (cfg);
3502 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, cfg->gsharedvt_info_var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
3508 mini_emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type)
3510 return emit_get_gsharedvt_info (cfg, &klass->byval_arg, rgctx_type);
3514 * On return the caller must check @klass for load errors.
3517 emit_class_init (MonoCompile *cfg, MonoClass *klass)
3519 MonoInst *vtable_arg;
3522 context_used = mini_class_check_context_used (cfg, klass);
3525 vtable_arg = mini_emit_get_rgctx_klass (cfg, context_used,
3526 klass, MONO_RGCTX_INFO_VTABLE);
3528 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3532 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
3535 if (!COMPILE_LLVM (cfg) && cfg->backend->have_op_generic_class_init) {
3539 * Using an opcode instead of emitting IR here allows the hiding of the call inside the opcode,
3540 * so this doesn't have to clobber any regs and it doesn't break basic blocks.
3542 MONO_INST_NEW (cfg, ins, OP_GENERIC_CLASS_INIT);
3543 ins->sreg1 = vtable_arg->dreg;
3544 MONO_ADD_INS (cfg->cbb, ins);
3547 MonoBasicBlock *inited_bb;
3548 MonoInst *args [16];
3550 inited_reg = alloc_ireg (cfg);
3552 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, inited_reg, vtable_arg->dreg, MONO_STRUCT_OFFSET (MonoVTable, initialized));
3554 NEW_BBLOCK (cfg, inited_bb);
3556 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, inited_reg, 0);
3557 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBNE_UN, inited_bb);
3559 args [0] = vtable_arg;
3560 mono_emit_jit_icall (cfg, mono_generic_class_init, args);
3562 MONO_START_BB (cfg, inited_bb);
3567 emit_seq_point (MonoCompile *cfg, MonoMethod *method, guint8* ip, gboolean intr_loc, gboolean nonempty_stack)
3571 if (cfg->gen_seq_points && cfg->method == method) {
3572 NEW_SEQ_POINT (cfg, ins, ip - cfg->header->code, intr_loc);
3574 ins->flags |= MONO_INST_NONEMPTY_STACK;
3575 MONO_ADD_INS (cfg->cbb, ins);
3580 mini_save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg, gboolean null_check)
3582 if (mini_get_debug_options ()->better_cast_details) {
3583 int vtable_reg = alloc_preg (cfg);
3584 int klass_reg = alloc_preg (cfg);
3585 MonoBasicBlock *is_null_bb = NULL;
3587 int to_klass_reg, context_used;
3590 NEW_BBLOCK (cfg, is_null_bb);
3592 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3593 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3596 tls_get = mono_create_tls_get (cfg, TLS_KEY_JIT_TLS);
3598 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3602 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3603 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3605 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3607 context_used = mini_class_check_context_used (cfg, klass);
3609 MonoInst *class_ins;
3611 class_ins = mini_emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3612 to_klass_reg = class_ins->dreg;
3614 to_klass_reg = alloc_preg (cfg);
3615 MONO_EMIT_NEW_CLASSCONST (cfg, to_klass_reg, klass);
3617 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3620 MONO_START_BB (cfg, is_null_bb);
3625 mini_reset_cast_details (MonoCompile *cfg)
3627 /* Reset the variables holding the cast details */
3628 if (mini_get_debug_options ()->better_cast_details) {
3629 MonoInst *tls_get = mono_create_tls_get (cfg, TLS_KEY_JIT_TLS);
3630 /* It is enough to reset the from field */
3631 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3636 * On return the caller must check @array_class for load errors
3639 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
3641 int vtable_reg = alloc_preg (cfg);
3644 context_used = mini_class_check_context_used (cfg, array_class);
3646 mini_save_cast_details (cfg, array_class, obj->dreg, FALSE);
3648 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3650 if (cfg->opt & MONO_OPT_SHARED) {
3651 int class_reg = alloc_preg (cfg);
3654 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3655 ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_CLASS, array_class);
3656 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, ins->dreg);
3657 } else if (context_used) {
3658 MonoInst *vtable_ins;
3660 vtable_ins = mini_emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
3661 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
3663 if (cfg->compile_aot) {
3667 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3669 vt_reg = alloc_preg (cfg);
3670 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
3671 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
3674 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3676 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
3680 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
3682 mini_reset_cast_details (cfg);
3686 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
3687 * generic code is generated.
3690 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
3692 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
3695 MonoInst *rgctx, *addr;
3697 /* FIXME: What if the class is shared? We might not
3698 have to get the address of the method from the
3700 addr = emit_get_rgctx_method (cfg, context_used, method,
3701 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3702 if (cfg->llvm_only) {
3703 cfg->signatures = g_slist_prepend_mempool (cfg->mempool, cfg->signatures, mono_method_signature (method));
3704 return emit_llvmonly_calli (cfg, mono_method_signature (method), &val, addr);
3706 rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3708 return mini_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
3711 gboolean pass_vtable, pass_mrgctx;
3712 MonoInst *rgctx_arg = NULL;
3714 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
3715 g_assert (!pass_mrgctx);
3718 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3721 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
3724 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
3729 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
3733 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
3734 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
3735 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
3736 int rank_reg = alloc_dreg (cfg ,STACK_I4);
3738 obj_reg = sp [0]->dreg;
3739 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3740 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
3742 /* FIXME: generics */
3743 g_assert (klass->rank == 0);
3746 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
3747 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3749 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3750 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, element_class));
3753 MonoInst *element_class;
3755 /* This assertion is from the unboxcast insn */
3756 g_assert (klass->rank == 0);
3758 element_class = mini_emit_get_rgctx_klass (cfg, context_used,
3759 klass, MONO_RGCTX_INFO_ELEMENT_KLASS);
3761 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
3762 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3764 mini_save_cast_details (cfg, klass->element_class, obj_reg, FALSE);
3765 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
3766 mini_reset_cast_details (cfg);
3769 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), obj_reg, sizeof (MonoObject));
3770 MONO_ADD_INS (cfg->cbb, add);
3771 add->type = STACK_MP;
3778 handle_unbox_gsharedvt (MonoCompile *cfg, MonoClass *klass, MonoInst *obj)
3780 MonoInst *addr, *klass_inst, *is_ref, *args[16];
3781 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
3785 klass_inst = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_KLASS);
3791 args [1] = klass_inst;
3794 obj = mono_emit_jit_icall (cfg, mono_object_castclass_unbox, args);
3796 NEW_BBLOCK (cfg, is_ref_bb);
3797 NEW_BBLOCK (cfg, is_nullable_bb);
3798 NEW_BBLOCK (cfg, end_bb);
3799 is_ref = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
3800 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
3801 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
3803 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_NULLABLE);
3804 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
3806 /* This will contain either the address of the unboxed vtype, or an address of the temporary where the ref is stored */
3807 addr_reg = alloc_dreg (cfg, STACK_MP);
3811 NEW_BIALU_IMM (cfg, addr, OP_ADD_IMM, addr_reg, obj->dreg, sizeof (MonoObject));
3812 MONO_ADD_INS (cfg->cbb, addr);
3814 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3817 MONO_START_BB (cfg, is_ref_bb);
3819 /* Save the ref to a temporary */
3820 dreg = alloc_ireg (cfg);
3821 EMIT_NEW_VARLOADA_VREG (cfg, addr, dreg, &klass->byval_arg);
3822 addr->dreg = addr_reg;
3823 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, obj->dreg);
3824 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3827 MONO_START_BB (cfg, is_nullable_bb);
3830 MonoInst *addr = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_NULLABLE_CLASS_UNBOX);
3831 MonoInst *unbox_call;
3832 MonoMethodSignature *unbox_sig;
3834 unbox_sig = (MonoMethodSignature *)mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
3835 unbox_sig->ret = &klass->byval_arg;
3836 unbox_sig->param_count = 1;
3837 unbox_sig->params [0] = &mono_defaults.object_class->byval_arg;
3840 unbox_call = emit_llvmonly_calli (cfg, unbox_sig, &obj, addr);
3842 unbox_call = mini_emit_calli (cfg, unbox_sig, &obj, addr, NULL, NULL);
3844 EMIT_NEW_VARLOADA_VREG (cfg, addr, unbox_call->dreg, &klass->byval_arg);
3845 addr->dreg = addr_reg;
3848 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3851 MONO_START_BB (cfg, end_bb);
3854 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr_reg, 0);
3860 * Returns NULL and set the cfg exception on error.
3863 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
3865 MonoInst *iargs [2];
3870 MonoRgctxInfoType rgctx_info;
3871 MonoInst *iargs [2];
3872 gboolean known_instance_size = !mini_is_gsharedvt_klass (klass);
3874 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (klass, for_box, known_instance_size);
3876 if (cfg->opt & MONO_OPT_SHARED)
3877 rgctx_info = MONO_RGCTX_INFO_KLASS;
3879 rgctx_info = MONO_RGCTX_INFO_VTABLE;
3880 data = mini_emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
3882 if (cfg->opt & MONO_OPT_SHARED) {
3883 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3885 alloc_ftn = ves_icall_object_new;
3888 alloc_ftn = ves_icall_object_new_specific;
3891 if (managed_alloc && !(cfg->opt & MONO_OPT_SHARED)) {
3892 if (known_instance_size) {
3893 int size = mono_class_instance_size (klass);
3894 if (size < sizeof (MonoObject))
3895 g_error ("Invalid size %d for class %s", size, mono_type_get_full_name (klass));
3897 EMIT_NEW_ICONST (cfg, iargs [1], size);
3899 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3902 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3905 if (cfg->opt & MONO_OPT_SHARED) {
3906 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3907 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
3909 alloc_ftn = ves_icall_object_new;
3910 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !mono_class_is_ginst (klass)) {
3911 /* This happens often in argument checking code, eg. throw new FooException... */
3912 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
3913 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
3914 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
3916 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3917 MonoMethod *managed_alloc = NULL;
3921 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
3922 cfg->exception_ptr = klass;
3926 managed_alloc = mono_gc_get_managed_allocator (klass, for_box, TRUE);
3928 if (managed_alloc) {
3929 int size = mono_class_instance_size (klass);
3930 if (size < sizeof (MonoObject))
3931 g_error ("Invalid size %d for class %s", size, mono_type_get_full_name (klass));
3933 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3934 EMIT_NEW_ICONST (cfg, iargs [1], size);
3935 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3937 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
3939 guint32 lw = vtable->klass->instance_size;
3940 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
3941 EMIT_NEW_ICONST (cfg, iargs [0], lw);
3942 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
3945 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3949 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3953 * Returns NULL and set the cfg exception on error.
3956 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used)
3958 MonoInst *alloc, *ins;
3960 if (mono_class_is_nullable (klass)) {
3961 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
3964 if (cfg->llvm_only && cfg->gsharedvt) {
3965 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
3966 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3967 return emit_llvmonly_calli (cfg, mono_method_signature (method), &val, addr);
3969 /* FIXME: What if the class is shared? We might not
3970 have to get the method address from the RGCTX. */
3971 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
3972 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3973 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->method, context_used);
3975 return mini_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
3978 gboolean pass_vtable, pass_mrgctx;
3979 MonoInst *rgctx_arg = NULL;
3981 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
3982 g_assert (!pass_mrgctx);
3985 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3988 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
3991 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
3995 if (mini_is_gsharedvt_klass (klass)) {
3996 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
3997 MonoInst *res, *is_ref, *src_var, *addr;
4000 dreg = alloc_ireg (cfg);
4002 NEW_BBLOCK (cfg, is_ref_bb);
4003 NEW_BBLOCK (cfg, is_nullable_bb);
4004 NEW_BBLOCK (cfg, end_bb);
4005 is_ref = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
4006 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
4007 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
4009 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_NULLABLE);
4010 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
4013 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4016 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4017 ins->opcode = OP_STOREV_MEMBASE;
4019 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, alloc->dreg);
4020 res->type = STACK_OBJ;
4022 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4025 MONO_START_BB (cfg, is_ref_bb);
4027 /* val is a vtype, so has to load the value manually */
4028 src_var = get_vreg_to_inst (cfg, val->dreg);
4030 src_var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, val->dreg);
4031 EMIT_NEW_VARLOADA (cfg, addr, src_var, src_var->inst_vtype);
4032 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, addr->dreg, 0);
4033 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4036 MONO_START_BB (cfg, is_nullable_bb);
4039 MonoInst *addr = mini_emit_get_gsharedvt_info_klass (cfg, klass,
4040 MONO_RGCTX_INFO_NULLABLE_CLASS_BOX);
4042 MonoMethodSignature *box_sig;
4045 * klass is Nullable<T>, need to call Nullable<T>.Box () using a gsharedvt signature, but we cannot
4046 * construct that method at JIT time, so have to do things by hand.
4048 box_sig = (MonoMethodSignature *)mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
4049 box_sig->ret = &mono_defaults.object_class->byval_arg;
4050 box_sig->param_count = 1;
4051 box_sig->params [0] = &klass->byval_arg;
4054 box_call = emit_llvmonly_calli (cfg, box_sig, &val, addr);
4056 box_call = mini_emit_calli (cfg, box_sig, &val, addr, NULL, NULL);
4057 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, box_call->dreg);
4058 res->type = STACK_OBJ;
4062 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4064 MONO_START_BB (cfg, end_bb);
4068 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4072 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4077 static GHashTable* direct_icall_type_hash;
4080 icall_is_direct_callable (MonoCompile *cfg, MonoMethod *cmethod)
4082 /* LLVM on amd64 can't handle calls to non-32 bit addresses */
4083 if (!direct_icalls_enabled (cfg))
4087 * An icall is directly callable if it doesn't directly or indirectly call mono_raise_exception ().
4088 * Whitelist a few icalls for now.
4090 if (!direct_icall_type_hash) {
4091 GHashTable *h = g_hash_table_new (g_str_hash, g_str_equal);
4093 g_hash_table_insert (h, (char*)"Decimal", GUINT_TO_POINTER (1));
4094 g_hash_table_insert (h, (char*)"Number", GUINT_TO_POINTER (1));
4095 g_hash_table_insert (h, (char*)"Buffer", GUINT_TO_POINTER (1));
4096 g_hash_table_insert (h, (char*)"Monitor", GUINT_TO_POINTER (1));
4097 mono_memory_barrier ();
4098 direct_icall_type_hash = h;
4101 if (cmethod->klass == mono_defaults.math_class)
4103 /* No locking needed */
4104 if (cmethod->klass->image == mono_defaults.corlib && g_hash_table_lookup (direct_icall_type_hash, cmethod->klass->name))
4110 method_needs_stack_walk (MonoCompile *cfg, MonoMethod *cmethod)
4112 if (cmethod->klass == mono_defaults.systemtype_class) {
4113 if (!strcmp (cmethod->name, "GetType"))
4119 static G_GNUC_UNUSED MonoInst*
4120 handle_enum_has_flag (MonoCompile *cfg, MonoClass *klass, MonoInst *enum_this, MonoInst *enum_flag)
4122 MonoType *enum_type = mono_type_get_underlying_type (&klass->byval_arg);
4123 guint32 load_opc = mono_type_to_load_membase (cfg, enum_type);
4126 switch (enum_type->type) {
4129 #if SIZEOF_REGISTER == 8
4141 MonoInst *load, *and_, *cmp, *ceq;
4142 int enum_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
4143 int and_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
4144 int dest_reg = alloc_ireg (cfg);
4146 EMIT_NEW_LOAD_MEMBASE (cfg, load, load_opc, enum_reg, enum_this->dreg, 0);
4147 EMIT_NEW_BIALU (cfg, and_, is_i4 ? OP_IAND : OP_LAND, and_reg, enum_reg, enum_flag->dreg);
4148 EMIT_NEW_BIALU (cfg, cmp, is_i4 ? OP_ICOMPARE : OP_LCOMPARE, -1, and_reg, enum_flag->dreg);
4149 EMIT_NEW_UNALU (cfg, ceq, is_i4 ? OP_ICEQ : OP_LCEQ, dest_reg, -1);
4151 ceq->type = STACK_I4;
4154 load = mono_decompose_opcode (cfg, load);
4155 and_ = mono_decompose_opcode (cfg, and_);
4156 cmp = mono_decompose_opcode (cfg, cmp);
4157 ceq = mono_decompose_opcode (cfg, ceq);
4165 * Returns NULL and set the cfg exception on error.
4167 static G_GNUC_UNUSED MonoInst*
4168 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used, gboolean virtual_)
4172 gpointer trampoline;
4173 MonoInst *obj, *method_ins, *tramp_ins;
4177 if (virtual_ && !cfg->llvm_only) {
4178 MonoMethod *invoke = mono_get_delegate_invoke (klass);
4181 if (!mono_get_delegate_virtual_invoke_impl (mono_method_signature (invoke), context_used ? NULL : method))
4185 obj = handle_alloc (cfg, klass, FALSE, mono_class_check_context_used (klass));
4189 /* Inline the contents of mono_delegate_ctor */
4191 /* Set target field */
4192 /* Optimize away setting of NULL target */
4193 if (!MONO_INS_IS_PCONST_NULL (target)) {
4194 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
4195 if (cfg->gen_write_barriers) {
4196 dreg = alloc_preg (cfg);
4197 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target));
4198 mini_emit_write_barrier (cfg, ptr, target);
4202 /* Set method field */
4203 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
4204 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
4207 * To avoid looking up the compiled code belonging to the target method
4208 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
4209 * store it, and we fill it after the method has been compiled.
4211 if (!method->dynamic && !(cfg->opt & MONO_OPT_SHARED)) {
4212 MonoInst *code_slot_ins;
4215 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
4217 domain = mono_domain_get ();
4218 mono_domain_lock (domain);
4219 if (!domain_jit_info (domain)->method_code_hash)
4220 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
4221 code_slot = (guint8 **)g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
4223 code_slot = (guint8 **)mono_domain_alloc0 (domain, sizeof (gpointer));
4224 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
4226 mono_domain_unlock (domain);
4228 code_slot_ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_METHOD_CODE_SLOT, method);
4230 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
4233 if (cfg->llvm_only) {
4234 MonoInst *args [16];
4239 args [2] = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
4240 mono_emit_jit_icall (cfg, mono_llvmonly_init_delegate_virtual, args);
4243 mono_emit_jit_icall (cfg, mono_llvmonly_init_delegate, args);
4249 if (cfg->compile_aot) {
4250 MonoDelegateClassMethodPair *del_tramp;
4252 del_tramp = (MonoDelegateClassMethodPair *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoDelegateClassMethodPair));
4253 del_tramp->klass = klass;
4254 del_tramp->method = context_used ? NULL : method;
4255 del_tramp->is_virtual = virtual_;
4256 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, del_tramp);
4259 trampoline = mono_create_delegate_virtual_trampoline (cfg->domain, klass, context_used ? NULL : method);
4261 trampoline = mono_create_delegate_trampoline_info (cfg->domain, klass, context_used ? NULL : method);
4262 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
4265 /* Set invoke_impl field */
4267 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
4269 dreg = alloc_preg (cfg);
4270 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, invoke_impl));
4271 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), dreg);
4273 dreg = alloc_preg (cfg);
4274 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, method_ptr));
4275 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr), dreg);
4278 dreg = alloc_preg (cfg);
4279 MONO_EMIT_NEW_ICONST (cfg, dreg, virtual_ ? 1 : 0);
4280 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_is_virtual), dreg);
4282 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
4288 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
4290 MonoJitICallInfo *info;
4292 /* Need to register the icall so it gets an icall wrapper */
4293 info = mono_get_array_new_va_icall (rank);
4295 cfg->flags |= MONO_CFG_HAS_VARARGS;
4297 /* mono_array_new_va () needs a vararg calling convention */
4298 cfg->exception_message = g_strdup ("array-new");
4299 cfg->disable_llvm = TRUE;
4301 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
4302 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
4306 * handle_constrained_gsharedvt_call:
4308 * Handle constrained calls where the receiver is a gsharedvt type.
4309 * Return the instruction representing the call. Set the cfg exception on failure.
4312 handle_constrained_gsharedvt_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp, MonoClass *constrained_class,
4313 gboolean *ref_emit_widen)
4315 MonoInst *ins = NULL;
4316 gboolean emit_widen = *ref_emit_widen;
4319 * Constrained calls need to behave differently at runtime dependending on whenever the receiver is instantiated as ref type or as a vtype.
4320 * This is hard to do with the current call code, since we would have to emit a branch and two different calls. So instead, we
4321 * pack the arguments into an array, and do the rest of the work in in an icall.
4323 if (((cmethod->klass == mono_defaults.object_class) || mono_class_is_interface (cmethod->klass) || (!cmethod->klass->valuetype && cmethod->klass->image != mono_defaults.corlib)) &&
4324 (MONO_TYPE_IS_VOID (fsig->ret) || MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_IS_REFERENCE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret) || mono_class_is_enum (mono_class_from_mono_type (fsig->ret)) || mini_is_gsharedvt_type (fsig->ret)) &&
4325 (fsig->param_count == 0 || (!fsig->hasthis && fsig->param_count == 1) || (fsig->param_count == 1 && (MONO_TYPE_IS_REFERENCE (fsig->params [0]) || fsig->params [0]->byref || mini_is_gsharedvt_type (fsig->params [0]))))) {
4326 MonoInst *args [16];
4329 * This case handles calls to
4330 * - object:ToString()/Equals()/GetHashCode(),
4331 * - System.IComparable<T>:CompareTo()
4332 * - System.IEquatable<T>:Equals ()
4333 * plus some simple interface calls enough to support AsyncTaskMethodBuilder.
4337 if (mono_method_check_context_used (cmethod))
4338 args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (cmethod), cmethod, MONO_RGCTX_INFO_METHOD);
4340 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
4341 args [2] = mini_emit_get_rgctx_klass (cfg, mono_class_check_context_used (constrained_class), constrained_class, MONO_RGCTX_INFO_KLASS);
4343 /* !fsig->hasthis is for the wrapper for the Object.GetType () icall */
4344 if (fsig->hasthis && fsig->param_count) {
4345 /* Pass the arguments using a localloc-ed array using the format expected by runtime_invoke () */
4346 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
4347 ins->dreg = alloc_preg (cfg);
4348 ins->inst_imm = fsig->param_count * sizeof (mgreg_t);
4349 MONO_ADD_INS (cfg->cbb, ins);
4352 if (mini_is_gsharedvt_type (fsig->params [0])) {
4353 int addr_reg, deref_arg_reg;
4355 ins = mini_emit_get_gsharedvt_info_klass (cfg, mono_class_from_mono_type (fsig->params [0]), MONO_RGCTX_INFO_CLASS_BOX_TYPE);
4356 deref_arg_reg = alloc_preg (cfg);
4357 /* deref_arg = BOX_TYPE != MONO_GSHAREDVT_BOX_TYPE_VTYPE */
4358 EMIT_NEW_BIALU_IMM (cfg, args [3], OP_ISUB_IMM, deref_arg_reg, ins->dreg, 1);
4360 EMIT_NEW_VARLOADA_VREG (cfg, ins, sp [1]->dreg, fsig->params [0]);
4361 addr_reg = ins->dreg;
4362 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, addr_reg);
4364 EMIT_NEW_ICONST (cfg, args [3], 0);
4365 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, sp [1]->dreg);
4368 EMIT_NEW_ICONST (cfg, args [3], 0);
4369 EMIT_NEW_ICONST (cfg, args [4], 0);
4371 ins = mono_emit_jit_icall (cfg, mono_gsharedvt_constrained_call, args);
4374 if (mini_is_gsharedvt_type (fsig->ret)) {
4375 ins = handle_unbox_gsharedvt (cfg, mono_class_from_mono_type (fsig->ret), ins);
4376 } else if (MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret) || mono_class_is_enum (mono_class_from_mono_type (fsig->ret))) {
4380 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), ins->dreg, sizeof (MonoObject));
4381 MONO_ADD_INS (cfg->cbb, add);
4383 NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, add->dreg, 0);
4384 MONO_ADD_INS (cfg->cbb, ins);
4385 /* ins represents the call result */
4388 GSHAREDVT_FAILURE (CEE_CALLVIRT);
4391 *ref_emit_widen = emit_widen;
4400 mono_emit_load_got_addr (MonoCompile *cfg)
4402 MonoInst *getaddr, *dummy_use;
4404 if (!cfg->got_var || cfg->got_var_allocated)
4407 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
4408 getaddr->cil_code = cfg->header->code;
4409 getaddr->dreg = cfg->got_var->dreg;
4411 /* Add it to the start of the first bblock */
4412 if (cfg->bb_entry->code) {
4413 getaddr->next = cfg->bb_entry->code;
4414 cfg->bb_entry->code = getaddr;
4417 MONO_ADD_INS (cfg->bb_entry, getaddr);
4419 cfg->got_var_allocated = TRUE;
4422 * Add a dummy use to keep the got_var alive, since real uses might
4423 * only be generated by the back ends.
4424 * Add it to end_bblock, so the variable's lifetime covers the whole
4426 * It would be better to make the usage of the got var explicit in all
4427 * cases when the backend needs it (i.e. calls, throw etc.), so this
4428 * wouldn't be needed.
4430 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
4431 MONO_ADD_INS (cfg->bb_exit, dummy_use);
4434 static int inline_limit;
4435 static gboolean inline_limit_inited;
4438 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
4440 MonoMethodHeaderSummary header;
4442 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
4443 MonoMethodSignature *sig = mono_method_signature (method);
4447 if (cfg->disable_inline)
4452 if (cfg->inline_depth > 10)
4455 if (!mono_method_get_header_summary (method, &header))
4458 /*runtime, icall and pinvoke are checked by summary call*/
4459 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
4460 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
4461 (mono_class_is_marshalbyref (method->klass)) ||
4465 /* also consider num_locals? */
4466 /* Do the size check early to avoid creating vtables */
4467 if (!inline_limit_inited) {
4469 if ((inlinelimit = g_getenv ("MONO_INLINELIMIT"))) {
4470 inline_limit = atoi (inlinelimit);
4471 g_free (inlinelimit);
4473 inline_limit = INLINE_LENGTH_LIMIT;
4474 inline_limit_inited = TRUE;
4476 if (header.code_size >= inline_limit && !(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
4480 * if we can initialize the class of the method right away, we do,
4481 * otherwise we don't allow inlining if the class needs initialization,
4482 * since it would mean inserting a call to mono_runtime_class_init()
4483 * inside the inlined code
4485 if (cfg->gshared && method->klass->has_cctor && mini_class_check_context_used (cfg, method->klass))
4488 if (!(cfg->opt & MONO_OPT_SHARED)) {
4489 /* The AggressiveInlining hint is a good excuse to force that cctor to run. */
4490 if (method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING) {
4491 if (method->klass->has_cctor) {
4492 vtable = mono_class_vtable (cfg->domain, method->klass);
4495 if (!cfg->compile_aot) {
4497 if (!mono_runtime_class_init_full (vtable, &error)) {
4498 mono_error_cleanup (&error);
4503 } else if (mono_class_is_before_field_init (method->klass)) {
4504 if (cfg->run_cctors && method->klass->has_cctor) {
4505 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
4506 if (!method->klass->runtime_info)
4507 /* No vtable created yet */
4509 vtable = mono_class_vtable (cfg->domain, method->klass);
4512 /* This makes so that inline cannot trigger */
4513 /* .cctors: too many apps depend on them */
4514 /* running with a specific order... */
4515 if (! vtable->initialized)
4518 if (!mono_runtime_class_init_full (vtable, &error)) {
4519 mono_error_cleanup (&error);
4523 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
4524 if (!method->klass->runtime_info)
4525 /* No vtable created yet */
4527 vtable = mono_class_vtable (cfg->domain, method->klass);
4530 if (!vtable->initialized)
4535 * If we're compiling for shared code
4536 * the cctor will need to be run at aot method load time, for example,
4537 * or at the end of the compilation of the inlining method.
4539 if (mono_class_needs_cctor_run (method->klass, NULL) && !mono_class_is_before_field_init (method->klass))
4543 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
4544 if (mono_arch_is_soft_float ()) {
4546 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
4548 for (i = 0; i < sig->param_count; ++i)
4549 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
4554 if (g_list_find (cfg->dont_inline, method))
4561 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoClass *klass, MonoVTable *vtable)
4563 if (!cfg->compile_aot) {
4565 if (vtable->initialized)
4569 if (mono_class_is_before_field_init (klass)) {
4570 if (cfg->method == method)
4574 if (!mono_class_needs_cctor_run (klass, method))
4577 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (klass == method->klass))
4578 /* The initialization is already done before the method is called */
4585 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
4589 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
4592 if (mini_is_gsharedvt_variable_klass (klass)) {
4595 mono_class_init (klass);
4596 size = mono_class_array_element_size (klass);
4599 mult_reg = alloc_preg (cfg);
4600 array_reg = arr->dreg;
4601 index_reg = index->dreg;
4603 #if SIZEOF_REGISTER == 8
4604 /* The array reg is 64 bits but the index reg is only 32 */
4605 if (COMPILE_LLVM (cfg)) {
4607 * abcrem can't handle the OP_SEXT_I4, so add this after abcrem,
4608 * during OP_BOUNDS_CHECK decomposition, and in the implementation
4609 * of OP_X86_LEA for llvm.
4611 index2_reg = index_reg;
4613 index2_reg = alloc_preg (cfg);
4614 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
4617 if (index->type == STACK_I8) {
4618 index2_reg = alloc_preg (cfg);
4619 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
4621 index2_reg = index_reg;
4626 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
4628 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4629 if (size == 1 || size == 2 || size == 4 || size == 8) {
4630 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
4632 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], MONO_STRUCT_OFFSET (MonoArray, vector));
4633 ins->klass = mono_class_get_element_class (klass);
4634 ins->type = STACK_MP;
4640 add_reg = alloc_ireg_mp (cfg);
4643 MonoInst *rgctx_ins;
4646 g_assert (cfg->gshared);
4647 context_used = mini_class_check_context_used (cfg, klass);
4648 g_assert (context_used);
4649 rgctx_ins = mini_emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE);
4650 MONO_EMIT_NEW_BIALU (cfg, OP_IMUL, mult_reg, index2_reg, rgctx_ins->dreg);
4652 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
4654 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
4655 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
4656 ins->klass = mono_class_get_element_class (klass);
4657 ins->type = STACK_MP;
4658 MONO_ADD_INS (cfg->cbb, ins);
4664 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
4666 int bounds_reg = alloc_preg (cfg);
4667 int add_reg = alloc_ireg_mp (cfg);
4668 int mult_reg = alloc_preg (cfg);
4669 int mult2_reg = alloc_preg (cfg);
4670 int low1_reg = alloc_preg (cfg);
4671 int low2_reg = alloc_preg (cfg);
4672 int high1_reg = alloc_preg (cfg);
4673 int high2_reg = alloc_preg (cfg);
4674 int realidx1_reg = alloc_preg (cfg);
4675 int realidx2_reg = alloc_preg (cfg);
4676 int sum_reg = alloc_preg (cfg);
4677 int index1, index2, tmpreg;
4681 mono_class_init (klass);
4682 size = mono_class_array_element_size (klass);
4684 index1 = index_ins1->dreg;
4685 index2 = index_ins2->dreg;
4687 #if SIZEOF_REGISTER == 8
4688 /* The array reg is 64 bits but the index reg is only 32 */
4689 if (COMPILE_LLVM (cfg)) {
4692 tmpreg = alloc_preg (cfg);
4693 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index1);
4695 tmpreg = alloc_preg (cfg);
4696 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index2);
4700 // FIXME: Do we need to do something here for i8 indexes, like in ldelema_1_ins ?
4704 /* range checking */
4705 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
4706 arr->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
4708 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
4709 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4710 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
4711 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
4712 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
4713 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
4714 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4716 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
4717 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4718 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
4719 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
4720 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, length));
4721 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
4722 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4724 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
4725 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
4726 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
4727 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
4728 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
4730 ins->type = STACK_MP;
4732 MONO_ADD_INS (cfg->cbb, ins);
4738 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
4742 MonoMethod *addr_method;
4744 MonoClass *eclass = cmethod->klass->element_class;
4746 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
4749 return mini_emit_ldelema_1_ins (cfg, eclass, sp [0], sp [1], TRUE);
4751 /* emit_ldelema_2 depends on OP_LMUL */
4752 if (!cfg->backend->emulate_mul_div && rank == 2 && (cfg->opt & MONO_OPT_INTRINS) && !mini_is_gsharedvt_variable_klass (eclass)) {
4753 return mini_emit_ldelema_2_ins (cfg, eclass, sp [0], sp [1], sp [2]);
4756 if (mini_is_gsharedvt_variable_klass (eclass))
4759 element_size = mono_class_array_element_size (eclass);
4760 addr_method = mono_marshal_get_array_address (rank, element_size);
4761 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
4766 static MonoBreakPolicy
4767 always_insert_breakpoint (MonoMethod *method)
4769 return MONO_BREAK_POLICY_ALWAYS;
4772 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
4775 * mono_set_break_policy:
4776 * \param policy_callback the new callback function
4778 * Allow embedders to decide wherther to actually obey breakpoint instructions
4779 * (both break IL instructions and \c Debugger.Break method calls), for example
4780 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
4781 * untrusted or semi-trusted code.
4783 * \p policy_callback will be called every time a break point instruction needs to
4784 * be inserted with the method argument being the method that calls \c Debugger.Break
4785 * or has the IL \c break instruction. The callback should return \c MONO_BREAK_POLICY_NEVER
4786 * if it wants the breakpoint to not be effective in the given method.
4787 * \c MONO_BREAK_POLICY_ALWAYS is the default.
4790 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
4792 if (policy_callback)
4793 break_policy_func = policy_callback;
4795 break_policy_func = always_insert_breakpoint;
4799 should_insert_brekpoint (MonoMethod *method) {
4800 switch (break_policy_func (method)) {
4801 case MONO_BREAK_POLICY_ALWAYS:
4803 case MONO_BREAK_POLICY_NEVER:
4805 case MONO_BREAK_POLICY_ON_DBG:
4806 g_warning ("mdb no longer supported");
4809 g_warning ("Incorrect value returned from break policy callback");
4814 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
4816 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
4818 MonoInst *addr, *store, *load;
4819 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
4821 /* the bounds check is already done by the callers */
4822 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
4824 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
4825 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
4826 if (mini_type_is_reference (&eklass->byval_arg))
4827 mini_emit_write_barrier (cfg, addr, load);
4829 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
4830 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
4837 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
4839 return mini_type_is_reference (&klass->byval_arg);
4843 emit_array_store (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, gboolean safety_checks)
4845 if (safety_checks && generic_class_is_reference_type (cfg, klass) &&
4846 !(MONO_INS_IS_PCONST_NULL (sp [2]))) {
4847 MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class, 1);
4848 MonoMethod *helper = mono_marshal_get_virtual_stelemref (obj_array);
4849 MonoInst *iargs [3];
4852 mono_class_setup_vtable (obj_array);
4853 g_assert (helper->slot);
4855 if (sp [0]->type != STACK_OBJ)
4857 if (sp [2]->type != STACK_OBJ)
4864 return mono_emit_method_call (cfg, helper, iargs, sp [0]);
4868 if (mini_is_gsharedvt_variable_klass (klass)) {
4871 // FIXME-VT: OP_ICONST optimization
4872 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
4873 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
4874 ins->opcode = OP_STOREV_MEMBASE;
4875 } else if (sp [1]->opcode == OP_ICONST) {
4876 int array_reg = sp [0]->dreg;
4877 int index_reg = sp [1]->dreg;
4878 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
4880 if (SIZEOF_REGISTER == 8 && COMPILE_LLVM (cfg))
4881 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, index_reg);
4884 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
4885 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
4887 MonoInst *addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], safety_checks);
4888 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
4889 if (generic_class_is_reference_type (cfg, klass))
4890 mini_emit_write_barrier (cfg, addr, sp [2]);
4897 emit_array_unsafe_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
4902 eklass = mono_class_from_mono_type (fsig->params [2]);
4904 eklass = mono_class_from_mono_type (fsig->ret);
4907 return emit_array_store (cfg, eklass, args, FALSE);
4909 MonoInst *ins, *addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
4910 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &eklass->byval_arg, addr->dreg, 0);
4916 is_unsafe_mov_compatible (MonoCompile *cfg, MonoClass *param_klass, MonoClass *return_klass)
4919 int param_size, return_size;
4921 param_klass = mono_class_from_mono_type (mini_get_underlying_type (¶m_klass->byval_arg));
4922 return_klass = mono_class_from_mono_type (mini_get_underlying_type (&return_klass->byval_arg));
4924 if (cfg->verbose_level > 3)
4925 printf ("[UNSAFE-MOV-INTRISIC] %s <- %s\n", return_klass->name, param_klass->name);
4927 //Don't allow mixing reference types with value types
4928 if (param_klass->valuetype != return_klass->valuetype) {
4929 if (cfg->verbose_level > 3)
4930 printf ("[UNSAFE-MOV-INTRISIC]\tone of the args is a valuetype and the other is not\n");
4934 if (!param_klass->valuetype) {
4935 if (cfg->verbose_level > 3)
4936 printf ("[UNSAFE-MOV-INTRISIC]\targs are reference types\n");
4941 if (param_klass->has_references || return_klass->has_references)
4944 /* Avoid mixing structs and primitive types/enums, they need to be handled differently in the JIT */
4945 if ((MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && !MONO_TYPE_ISSTRUCT (&return_klass->byval_arg)) ||
4946 (!MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && MONO_TYPE_ISSTRUCT (&return_klass->byval_arg))) {
4947 if (cfg->verbose_level > 3)
4948 printf ("[UNSAFE-MOV-INTRISIC]\tmixing structs and scalars\n");
4952 if (param_klass->byval_arg.type == MONO_TYPE_R4 || param_klass->byval_arg.type == MONO_TYPE_R8 ||
4953 return_klass->byval_arg.type == MONO_TYPE_R4 || return_klass->byval_arg.type == MONO_TYPE_R8) {
4954 if (cfg->verbose_level > 3)
4955 printf ("[UNSAFE-MOV-INTRISIC]\tfloat or double are not supported\n");
4959 param_size = mono_class_value_size (param_klass, &align);
4960 return_size = mono_class_value_size (return_klass, &align);
4962 //We can do it if sizes match
4963 if (param_size == return_size) {
4964 if (cfg->verbose_level > 3)
4965 printf ("[UNSAFE-MOV-INTRISIC]\tsame size\n");
4969 //No simple way to handle struct if sizes don't match
4970 if (MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg)) {
4971 if (cfg->verbose_level > 3)
4972 printf ("[UNSAFE-MOV-INTRISIC]\tsize mismatch and type is a struct\n");
4977 * Same reg size category.
4978 * A quick note on why we don't require widening here.
4979 * The intrinsic is "R Array.UnsafeMov<S,R> (S s)".
4981 * Since the source value comes from a function argument, the JIT will already have
4982 * the value in a VREG and performed any widening needed before (say, when loading from a field).
4984 if (param_size <= 4 && return_size <= 4) {
4985 if (cfg->verbose_level > 3)
4986 printf ("[UNSAFE-MOV-INTRISIC]\tsize mismatch but both are of the same reg class\n");
4994 emit_array_unsafe_mov (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args)
4996 MonoClass *param_klass = mono_class_from_mono_type (fsig->params [0]);
4997 MonoClass *return_klass = mono_class_from_mono_type (fsig->ret);
4999 if (mini_is_gsharedvt_variable_type (fsig->ret))
5002 //Valuetypes that are semantically equivalent or numbers than can be widened to
5003 if (is_unsafe_mov_compatible (cfg, param_klass, return_klass))
5006 //Arrays of valuetypes that are semantically equivalent
5007 if (param_klass->rank == 1 && return_klass->rank == 1 && is_unsafe_mov_compatible (cfg, param_klass->element_class, return_klass->element_class))
5014 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5016 #ifdef MONO_ARCH_SIMD_INTRINSICS
5017 MonoInst *ins = NULL;
5019 if (cfg->opt & MONO_OPT_SIMD) {
5020 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
5026 return mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
5030 mini_emit_memory_barrier (MonoCompile *cfg, int kind)
5032 MonoInst *ins = NULL;
5033 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
5034 MONO_ADD_INS (cfg->cbb, ins);
5035 ins->backend.memory_barrier_kind = kind;
5041 llvm_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5043 MonoInst *ins = NULL;
5046 /* The LLVM backend supports these intrinsics */
5047 if (cmethod->klass == mono_defaults.math_class) {
5048 if (strcmp (cmethod->name, "Sin") == 0) {
5050 } else if (strcmp (cmethod->name, "Cos") == 0) {
5052 } else if (strcmp (cmethod->name, "Sqrt") == 0) {
5054 } else if (strcmp (cmethod->name, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8) {
5058 if (opcode && fsig->param_count == 1) {
5059 MONO_INST_NEW (cfg, ins, opcode);
5060 ins->type = STACK_R8;
5061 ins->dreg = mono_alloc_dreg (cfg, ins->type);
5062 ins->sreg1 = args [0]->dreg;
5063 MONO_ADD_INS (cfg->cbb, ins);
5067 if (cfg->opt & MONO_OPT_CMOV) {
5068 if (strcmp (cmethod->name, "Min") == 0) {
5069 if (fsig->params [0]->type == MONO_TYPE_I4)
5071 if (fsig->params [0]->type == MONO_TYPE_U4)
5072 opcode = OP_IMIN_UN;
5073 else if (fsig->params [0]->type == MONO_TYPE_I8)
5075 else if (fsig->params [0]->type == MONO_TYPE_U8)
5076 opcode = OP_LMIN_UN;
5077 } else if (strcmp (cmethod->name, "Max") == 0) {
5078 if (fsig->params [0]->type == MONO_TYPE_I4)
5080 if (fsig->params [0]->type == MONO_TYPE_U4)
5081 opcode = OP_IMAX_UN;
5082 else if (fsig->params [0]->type == MONO_TYPE_I8)
5084 else if (fsig->params [0]->type == MONO_TYPE_U8)
5085 opcode = OP_LMAX_UN;
5089 if (opcode && fsig->param_count == 2) {
5090 MONO_INST_NEW (cfg, ins, opcode);
5091 ins->type = fsig->params [0]->type == MONO_TYPE_I4 ? STACK_I4 : STACK_I8;
5092 ins->dreg = mono_alloc_dreg (cfg, ins->type);
5093 ins->sreg1 = args [0]->dreg;
5094 ins->sreg2 = args [1]->dreg;
5095 MONO_ADD_INS (cfg->cbb, ins);
5103 mini_emit_inst_for_sharable_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5105 if (cmethod->klass == mono_defaults.array_class) {
5106 if (strcmp (cmethod->name, "UnsafeStore") == 0)
5107 return emit_array_unsafe_access (cfg, fsig, args, TRUE);
5108 else if (strcmp (cmethod->name, "UnsafeLoad") == 0)
5109 return emit_array_unsafe_access (cfg, fsig, args, FALSE);
5110 else if (strcmp (cmethod->name, "UnsafeMov") == 0)
5111 return emit_array_unsafe_mov (cfg, fsig, args);
5118 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5120 MonoInst *ins = NULL;
5121 MonoClass *runtime_helpers_class = mono_class_get_runtime_helpers_class ();
5123 if (cmethod->klass == mono_defaults.string_class) {
5124 if (strcmp (cmethod->name, "get_Chars") == 0 && fsig->param_count + fsig->hasthis == 2) {
5125 int dreg = alloc_ireg (cfg);
5126 int index_reg = alloc_preg (cfg);
5127 int add_reg = alloc_preg (cfg);
5129 #if SIZEOF_REGISTER == 8
5130 if (COMPILE_LLVM (cfg)) {
5131 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, args [1]->dreg);
5133 /* The array reg is 64 bits but the index reg is only 32 */
5134 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
5137 index_reg = args [1]->dreg;
5139 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
5141 #if defined(TARGET_X86) || defined(TARGET_AMD64)
5142 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, MONO_STRUCT_OFFSET (MonoString, chars));
5143 add_reg = ins->dreg;
5144 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5147 int mult_reg = alloc_preg (cfg);
5148 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
5149 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
5150 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5151 add_reg, MONO_STRUCT_OFFSET (MonoString, chars));
5153 type_from_op (cfg, ins, NULL, NULL);
5155 } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count + fsig->hasthis == 1) {
5156 int dreg = alloc_ireg (cfg);
5157 /* Decompose later to allow more optimizations */
5158 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
5159 ins->type = STACK_I4;
5160 ins->flags |= MONO_INST_FAULT;
5161 cfg->cbb->has_array_access = TRUE;
5162 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
5167 } else if (cmethod->klass == mono_defaults.object_class) {
5168 if (strcmp (cmethod->name, "GetType") == 0 && fsig->param_count + fsig->hasthis == 1) {
5169 int dreg = alloc_ireg_ref (cfg);
5170 int vt_reg = alloc_preg (cfg);
5171 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
5172 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, MONO_STRUCT_OFFSET (MonoVTable, type));
5173 type_from_op (cfg, ins, NULL, NULL);
5176 } else if (!cfg->backend->emulate_mul_div && strcmp (cmethod->name, "InternalGetHashCode") == 0 && fsig->param_count == 1 && !mono_gc_is_moving ()) {
5177 int dreg = alloc_ireg (cfg);
5178 int t1 = alloc_ireg (cfg);
5180 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
5181 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
5182 ins->type = STACK_I4;
5185 } else if (strcmp (cmethod->name, ".ctor") == 0 && fsig->param_count == 0) {
5186 MONO_INST_NEW (cfg, ins, OP_NOP);
5187 MONO_ADD_INS (cfg->cbb, ins);
5191 } else if (cmethod->klass == mono_defaults.array_class) {
5192 if (strcmp (cmethod->name, "GetGenericValueImpl") == 0 && fsig->param_count + fsig->hasthis == 3 && !cfg->gsharedvt)
5193 return emit_array_generic_access (cfg, fsig, args, FALSE);
5194 else if (strcmp (cmethod->name, "SetGenericValueImpl") == 0 && fsig->param_count + fsig->hasthis == 3 && !cfg->gsharedvt)
5195 return emit_array_generic_access (cfg, fsig, args, TRUE);
5197 #ifndef MONO_BIG_ARRAYS
5199 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
5202 else if (((strcmp (cmethod->name, "GetLength") == 0 && fsig->param_count + fsig->hasthis == 2) ||
5203 (strcmp (cmethod->name, "GetLowerBound") == 0 && fsig->param_count + fsig->hasthis == 2)) &&
5204 args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
5205 int dreg = alloc_ireg (cfg);
5206 int bounds_reg = alloc_ireg_mp (cfg);
5207 MonoBasicBlock *end_bb, *szarray_bb;
5208 gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
5210 NEW_BBLOCK (cfg, end_bb);
5211 NEW_BBLOCK (cfg, szarray_bb);
5213 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
5214 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
5215 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
5216 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
5217 /* Non-szarray case */
5219 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5220 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5222 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5223 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5224 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
5225 MONO_START_BB (cfg, szarray_bb);
5228 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5229 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
5231 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
5232 MONO_START_BB (cfg, end_bb);
5234 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
5235 ins->type = STACK_I4;
5241 if (cmethod->name [0] != 'g')
5244 if (strcmp (cmethod->name, "get_Rank") == 0 && fsig->param_count + fsig->hasthis == 1) {
5245 int dreg = alloc_ireg (cfg);
5246 int vtable_reg = alloc_preg (cfg);
5247 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
5248 args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
5249 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
5250 vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
5251 type_from_op (cfg, ins, NULL, NULL);
5254 } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count + fsig->hasthis == 1) {
5255 int dreg = alloc_ireg (cfg);
5257 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5258 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
5259 type_from_op (cfg, ins, NULL, NULL);
5264 } else if (cmethod->klass == runtime_helpers_class) {
5265 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0 && fsig->param_count == 0) {
5266 EMIT_NEW_ICONST (cfg, ins, MONO_STRUCT_OFFSET (MonoString, chars));
5268 } else if (strcmp (cmethod->name, "IsReferenceOrContainsReferences") == 0 && fsig->param_count == 0) {
5269 MonoGenericContext *ctx = mono_method_get_context (cmethod);
5271 g_assert (ctx->method_inst);
5272 g_assert (ctx->method_inst->type_argc == 1);
5273 MonoType *t = mini_get_underlying_type (ctx->method_inst->type_argv [0]);
5274 MonoClass *klass = mono_class_from_mono_type (t);
5278 mono_class_init (klass);
5279 if (MONO_TYPE_IS_REFERENCE (t))
5280 EMIT_NEW_ICONST (cfg, ins, 1);
5281 else if (MONO_TYPE_IS_PRIMITIVE (t))
5282 EMIT_NEW_ICONST (cfg, ins, 0);
5283 else if (cfg->gshared && (t->type == MONO_TYPE_VAR || t->type == MONO_TYPE_MVAR) && !mini_type_var_is_vt (t))
5284 EMIT_NEW_ICONST (cfg, ins, 1);
5285 else if (!cfg->gshared || !mini_class_check_context_used (cfg, klass))
5286 EMIT_NEW_ICONST (cfg, ins, klass->has_references ? 1 : 0);
5288 g_assert (cfg->gshared);
5290 int context_used = mini_class_check_context_used (cfg, klass);
5292 /* This returns 1 or 2 */
5293 MonoInst *info = mini_emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CLASS_IS_REF_OR_CONTAINS_REFS);
5294 int dreg = alloc_ireg (cfg);
5295 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ISUB_IMM, dreg, info->dreg, 1);
5301 } else if (cmethod->klass == mono_defaults.monitor_class) {
5302 gboolean is_enter = FALSE;
5303 gboolean is_v4 = FALSE;
5305 if (!strcmp (cmethod->name, "Enter") && fsig->param_count == 2 && fsig->params [1]->byref) {
5309 if (!strcmp (cmethod->name, "Enter") && fsig->param_count == 1)
5314 * To make async stack traces work, icalls which can block should have a wrapper.
5315 * For Monitor.Enter, emit two calls: a fastpath which doesn't have a wrapper, and a slowpath, which does.
5317 MonoBasicBlock *end_bb;
5319 NEW_BBLOCK (cfg, end_bb);
5321 ins = mono_emit_jit_icall (cfg, is_v4 ? (gpointer)mono_monitor_enter_v4_fast : (gpointer)mono_monitor_enter_fast, args);
5322 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, ins->dreg, 0);
5323 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBNE_UN, end_bb);
5324 ins = mono_emit_jit_icall (cfg, is_v4 ? (gpointer)mono_monitor_enter_v4_internal : (gpointer)mono_monitor_enter_internal, args);
5325 MONO_START_BB (cfg, end_bb);
5328 } else if (cmethod->klass == mono_defaults.thread_class) {
5329 if (strcmp (cmethod->name, "SpinWait_nop") == 0 && fsig->param_count == 0) {
5330 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
5331 MONO_ADD_INS (cfg->cbb, ins);
5333 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0) {
5334 return mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5335 } else if (!strcmp (cmethod->name, "VolatileRead") && fsig->param_count == 1) {
5337 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
5339 if (fsig->params [0]->type == MONO_TYPE_I1)
5340 opcode = OP_LOADI1_MEMBASE;
5341 else if (fsig->params [0]->type == MONO_TYPE_U1)
5342 opcode = OP_LOADU1_MEMBASE;
5343 else if (fsig->params [0]->type == MONO_TYPE_I2)
5344 opcode = OP_LOADI2_MEMBASE;
5345 else if (fsig->params [0]->type == MONO_TYPE_U2)
5346 opcode = OP_LOADU2_MEMBASE;
5347 else if (fsig->params [0]->type == MONO_TYPE_I4)
5348 opcode = OP_LOADI4_MEMBASE;
5349 else if (fsig->params [0]->type == MONO_TYPE_U4)
5350 opcode = OP_LOADU4_MEMBASE;
5351 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
5352 opcode = OP_LOADI8_MEMBASE;
5353 else if (fsig->params [0]->type == MONO_TYPE_R4)
5354 opcode = OP_LOADR4_MEMBASE;
5355 else if (fsig->params [0]->type == MONO_TYPE_R8)
5356 opcode = OP_LOADR8_MEMBASE;
5357 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
5358 opcode = OP_LOAD_MEMBASE;
5361 MONO_INST_NEW (cfg, ins, opcode);
5362 ins->inst_basereg = args [0]->dreg;
5363 ins->inst_offset = 0;
5364 MONO_ADD_INS (cfg->cbb, ins);
5366 switch (fsig->params [0]->type) {
5373 ins->dreg = mono_alloc_ireg (cfg);
5374 ins->type = STACK_I4;
5378 ins->dreg = mono_alloc_lreg (cfg);
5379 ins->type = STACK_I8;
5383 ins->dreg = mono_alloc_ireg (cfg);
5384 #if SIZEOF_REGISTER == 8
5385 ins->type = STACK_I8;
5387 ins->type = STACK_I4;
5392 ins->dreg = mono_alloc_freg (cfg);
5393 ins->type = STACK_R8;
5396 g_assert (mini_type_is_reference (fsig->params [0]));
5397 ins->dreg = mono_alloc_ireg_ref (cfg);
5398 ins->type = STACK_OBJ;
5402 if (opcode == OP_LOADI8_MEMBASE)
5403 ins = mono_decompose_opcode (cfg, ins);
5405 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5409 } else if (!strcmp (cmethod->name, "VolatileWrite") && fsig->param_count == 2) {
5411 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
5413 if (fsig->params [0]->type == MONO_TYPE_I1 || fsig->params [0]->type == MONO_TYPE_U1)
5414 opcode = OP_STOREI1_MEMBASE_REG;
5415 else if (fsig->params [0]->type == MONO_TYPE_I2 || fsig->params [0]->type == MONO_TYPE_U2)
5416 opcode = OP_STOREI2_MEMBASE_REG;
5417 else if (fsig->params [0]->type == MONO_TYPE_I4 || fsig->params [0]->type == MONO_TYPE_U4)
5418 opcode = OP_STOREI4_MEMBASE_REG;
5419 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
5420 opcode = OP_STOREI8_MEMBASE_REG;
5421 else if (fsig->params [0]->type == MONO_TYPE_R4)
5422 opcode = OP_STORER4_MEMBASE_REG;
5423 else if (fsig->params [0]->type == MONO_TYPE_R8)
5424 opcode = OP_STORER8_MEMBASE_REG;
5425 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
5426 opcode = OP_STORE_MEMBASE_REG;
5429 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5431 MONO_INST_NEW (cfg, ins, opcode);
5432 ins->sreg1 = args [1]->dreg;
5433 ins->inst_destbasereg = args [0]->dreg;
5434 ins->inst_offset = 0;
5435 MONO_ADD_INS (cfg->cbb, ins);
5437 if (opcode == OP_STOREI8_MEMBASE_REG)
5438 ins = mono_decompose_opcode (cfg, ins);
5443 } else if (cmethod->klass->image == mono_defaults.corlib &&
5444 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
5445 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
5448 #if SIZEOF_REGISTER == 8
5449 if (!cfg->llvm_only && strcmp (cmethod->name, "Read") == 0 && fsig->param_count == 1 && (fsig->params [0]->type == MONO_TYPE_I8)) {
5450 if (!cfg->llvm_only && mono_arch_opcode_supported (OP_ATOMIC_LOAD_I8)) {
5451 MONO_INST_NEW (cfg, ins, OP_ATOMIC_LOAD_I8);
5452 ins->dreg = mono_alloc_preg (cfg);
5453 ins->sreg1 = args [0]->dreg;
5454 ins->type = STACK_I8;
5455 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_SEQ;
5456 MONO_ADD_INS (cfg->cbb, ins);
5460 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5462 /* 64 bit reads are already atomic */
5463 MONO_INST_NEW (cfg, load_ins, OP_LOADI8_MEMBASE);
5464 load_ins->dreg = mono_alloc_preg (cfg);
5465 load_ins->inst_basereg = args [0]->dreg;
5466 load_ins->inst_offset = 0;
5467 load_ins->type = STACK_I8;
5468 MONO_ADD_INS (cfg->cbb, load_ins);
5470 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5477 if (strcmp (cmethod->name, "Increment") == 0 && fsig->param_count == 1) {
5478 MonoInst *ins_iconst;
5481 if (fsig->params [0]->type == MONO_TYPE_I4) {
5482 opcode = OP_ATOMIC_ADD_I4;
5483 cfg->has_atomic_add_i4 = TRUE;
5485 #if SIZEOF_REGISTER == 8
5486 else if (fsig->params [0]->type == MONO_TYPE_I8)
5487 opcode = OP_ATOMIC_ADD_I8;
5490 if (!mono_arch_opcode_supported (opcode))
5492 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
5493 ins_iconst->inst_c0 = 1;
5494 ins_iconst->dreg = mono_alloc_ireg (cfg);
5495 MONO_ADD_INS (cfg->cbb, ins_iconst);
5497 MONO_INST_NEW (cfg, ins, opcode);
5498 ins->dreg = mono_alloc_ireg (cfg);
5499 ins->inst_basereg = args [0]->dreg;
5500 ins->inst_offset = 0;
5501 ins->sreg2 = ins_iconst->dreg;
5502 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
5503 MONO_ADD_INS (cfg->cbb, ins);
5505 } else if (strcmp (cmethod->name, "Decrement") == 0 && fsig->param_count == 1) {
5506 MonoInst *ins_iconst;
5509 if (fsig->params [0]->type == MONO_TYPE_I4) {
5510 opcode = OP_ATOMIC_ADD_I4;
5511 cfg->has_atomic_add_i4 = TRUE;
5513 #if SIZEOF_REGISTER == 8
5514 else if (fsig->params [0]->type == MONO_TYPE_I8)
5515 opcode = OP_ATOMIC_ADD_I8;
5518 if (!mono_arch_opcode_supported (opcode))
5520 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
5521 ins_iconst->inst_c0 = -1;
5522 ins_iconst->dreg = mono_alloc_ireg (cfg);
5523 MONO_ADD_INS (cfg->cbb, ins_iconst);
5525 MONO_INST_NEW (cfg, ins, opcode);
5526 ins->dreg = mono_alloc_ireg (cfg);
5527 ins->inst_basereg = args [0]->dreg;
5528 ins->inst_offset = 0;
5529 ins->sreg2 = ins_iconst->dreg;
5530 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
5531 MONO_ADD_INS (cfg->cbb, ins);
5533 } else if (strcmp (cmethod->name, "Add") == 0 && fsig->param_count == 2) {
5536 if (fsig->params [0]->type == MONO_TYPE_I4) {
5537 opcode = OP_ATOMIC_ADD_I4;
5538 cfg->has_atomic_add_i4 = TRUE;
5540 #if SIZEOF_REGISTER == 8
5541 else if (fsig->params [0]->type == MONO_TYPE_I8)
5542 opcode = OP_ATOMIC_ADD_I8;
5545 if (!mono_arch_opcode_supported (opcode))
5547 MONO_INST_NEW (cfg, ins, opcode);
5548 ins->dreg = mono_alloc_ireg (cfg);
5549 ins->inst_basereg = args [0]->dreg;
5550 ins->inst_offset = 0;
5551 ins->sreg2 = args [1]->dreg;
5552 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
5553 MONO_ADD_INS (cfg->cbb, ins);
5556 else if (strcmp (cmethod->name, "Exchange") == 0 && fsig->param_count == 2) {
5557 MonoInst *f2i = NULL, *i2f;
5558 guint32 opcode, f2i_opcode, i2f_opcode;
5559 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
5560 gboolean is_float = fsig->params [0]->type == MONO_TYPE_R4 || fsig->params [0]->type == MONO_TYPE_R8;
5562 if (fsig->params [0]->type == MONO_TYPE_I4 ||
5563 fsig->params [0]->type == MONO_TYPE_R4) {
5564 opcode = OP_ATOMIC_EXCHANGE_I4;
5565 f2i_opcode = OP_MOVE_F_TO_I4;
5566 i2f_opcode = OP_MOVE_I4_TO_F;
5567 cfg->has_atomic_exchange_i4 = TRUE;
5569 #if SIZEOF_REGISTER == 8
5571 fsig->params [0]->type == MONO_TYPE_I8 ||
5572 fsig->params [0]->type == MONO_TYPE_R8 ||
5573 fsig->params [0]->type == MONO_TYPE_I) {
5574 opcode = OP_ATOMIC_EXCHANGE_I8;
5575 f2i_opcode = OP_MOVE_F_TO_I8;
5576 i2f_opcode = OP_MOVE_I8_TO_F;
5579 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I) {
5580 opcode = OP_ATOMIC_EXCHANGE_I4;
5581 cfg->has_atomic_exchange_i4 = TRUE;
5587 if (!mono_arch_opcode_supported (opcode))
5591 /* TODO: Decompose these opcodes instead of bailing here. */
5592 if (COMPILE_SOFT_FLOAT (cfg))
5595 MONO_INST_NEW (cfg, f2i, f2i_opcode);
5596 f2i->dreg = mono_alloc_ireg (cfg);
5597 f2i->sreg1 = args [1]->dreg;
5598 if (f2i_opcode == OP_MOVE_F_TO_I4)
5599 f2i->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
5600 MONO_ADD_INS (cfg->cbb, f2i);
5603 MONO_INST_NEW (cfg, ins, opcode);
5604 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : mono_alloc_ireg (cfg);
5605 ins->inst_basereg = args [0]->dreg;
5606 ins->inst_offset = 0;
5607 ins->sreg2 = is_float ? f2i->dreg : args [1]->dreg;
5608 MONO_ADD_INS (cfg->cbb, ins);
5610 switch (fsig->params [0]->type) {
5612 ins->type = STACK_I4;
5615 ins->type = STACK_I8;
5618 #if SIZEOF_REGISTER == 8
5619 ins->type = STACK_I8;
5621 ins->type = STACK_I4;
5626 ins->type = STACK_R8;
5629 g_assert (mini_type_is_reference (fsig->params [0]));
5630 ins->type = STACK_OBJ;
5635 MONO_INST_NEW (cfg, i2f, i2f_opcode);
5636 i2f->dreg = mono_alloc_freg (cfg);
5637 i2f->sreg1 = ins->dreg;
5638 i2f->type = STACK_R8;
5639 if (i2f_opcode == OP_MOVE_I4_TO_F)
5640 i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
5641 MONO_ADD_INS (cfg->cbb, i2f);
5646 if (cfg->gen_write_barriers && is_ref)
5647 mini_emit_write_barrier (cfg, args [0], args [1]);
5649 else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 3) {
5650 MonoInst *f2i_new = NULL, *f2i_cmp = NULL, *i2f;
5651 guint32 opcode, f2i_opcode, i2f_opcode;
5652 gboolean is_ref = mini_type_is_reference (fsig->params [1]);
5653 gboolean is_float = fsig->params [1]->type == MONO_TYPE_R4 || fsig->params [1]->type == MONO_TYPE_R8;
5655 if (fsig->params [1]->type == MONO_TYPE_I4 ||
5656 fsig->params [1]->type == MONO_TYPE_R4) {
5657 opcode = OP_ATOMIC_CAS_I4;
5658 f2i_opcode = OP_MOVE_F_TO_I4;
5659 i2f_opcode = OP_MOVE_I4_TO_F;
5660 cfg->has_atomic_cas_i4 = TRUE;
5662 #if SIZEOF_REGISTER == 8
5664 fsig->params [1]->type == MONO_TYPE_I8 ||
5665 fsig->params [1]->type == MONO_TYPE_R8 ||
5666 fsig->params [1]->type == MONO_TYPE_I) {
5667 opcode = OP_ATOMIC_CAS_I8;
5668 f2i_opcode = OP_MOVE_F_TO_I8;
5669 i2f_opcode = OP_MOVE_I8_TO_F;
5672 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I) {
5673 opcode = OP_ATOMIC_CAS_I4;
5674 cfg->has_atomic_cas_i4 = TRUE;
5680 if (!mono_arch_opcode_supported (opcode))
5684 /* TODO: Decompose these opcodes instead of bailing here. */
5685 if (COMPILE_SOFT_FLOAT (cfg))
5688 MONO_INST_NEW (cfg, f2i_new, f2i_opcode);
5689 f2i_new->dreg = mono_alloc_ireg (cfg);
5690 f2i_new->sreg1 = args [1]->dreg;
5691 if (f2i_opcode == OP_MOVE_F_TO_I4)
5692 f2i_new->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
5693 MONO_ADD_INS (cfg->cbb, f2i_new);
5695 MONO_INST_NEW (cfg, f2i_cmp, f2i_opcode);
5696 f2i_cmp->dreg = mono_alloc_ireg (cfg);
5697 f2i_cmp->sreg1 = args [2]->dreg;
5698 if (f2i_opcode == OP_MOVE_F_TO_I4)
5699 f2i_cmp->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
5700 MONO_ADD_INS (cfg->cbb, f2i_cmp);
5703 MONO_INST_NEW (cfg, ins, opcode);
5704 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
5705 ins->sreg1 = args [0]->dreg;
5706 ins->sreg2 = is_float ? f2i_new->dreg : args [1]->dreg;
5707 ins->sreg3 = is_float ? f2i_cmp->dreg : args [2]->dreg;
5708 MONO_ADD_INS (cfg->cbb, ins);
5710 switch (fsig->params [1]->type) {
5712 ins->type = STACK_I4;
5715 ins->type = STACK_I8;
5718 #if SIZEOF_REGISTER == 8
5719 ins->type = STACK_I8;
5721 ins->type = STACK_I4;
5725 ins->type = cfg->r4_stack_type;
5728 ins->type = STACK_R8;
5731 g_assert (mini_type_is_reference (fsig->params [1]));
5732 ins->type = STACK_OBJ;
5737 MONO_INST_NEW (cfg, i2f, i2f_opcode);
5738 i2f->dreg = mono_alloc_freg (cfg);
5739 i2f->sreg1 = ins->dreg;
5740 i2f->type = STACK_R8;
5741 if (i2f_opcode == OP_MOVE_I4_TO_F)
5742 i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
5743 MONO_ADD_INS (cfg->cbb, i2f);
5748 if (cfg->gen_write_barriers && is_ref)
5749 mini_emit_write_barrier (cfg, args [0], args [1]);
5751 else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 4 &&
5752 fsig->params [1]->type == MONO_TYPE_I4) {
5753 MonoInst *cmp, *ceq;
5755 if (!mono_arch_opcode_supported (OP_ATOMIC_CAS_I4))
5758 /* int32 r = CAS (location, value, comparand); */
5759 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
5760 ins->dreg = alloc_ireg (cfg);
5761 ins->sreg1 = args [0]->dreg;
5762 ins->sreg2 = args [1]->dreg;
5763 ins->sreg3 = args [2]->dreg;
5764 ins->type = STACK_I4;
5765 MONO_ADD_INS (cfg->cbb, ins);
5767 /* bool result = r == comparand; */
5768 MONO_INST_NEW (cfg, cmp, OP_ICOMPARE);
5769 cmp->sreg1 = ins->dreg;
5770 cmp->sreg2 = args [2]->dreg;
5771 cmp->type = STACK_I4;
5772 MONO_ADD_INS (cfg->cbb, cmp);
5774 MONO_INST_NEW (cfg, ceq, OP_ICEQ);
5775 ceq->dreg = alloc_ireg (cfg);
5776 ceq->type = STACK_I4;
5777 MONO_ADD_INS (cfg->cbb, ceq);
5779 /* *success = result; */
5780 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, args [3]->dreg, 0, ceq->dreg);
5782 cfg->has_atomic_cas_i4 = TRUE;
5784 else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0)
5785 ins = mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5789 } else if (cmethod->klass->image == mono_defaults.corlib &&
5790 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
5791 (strcmp (cmethod->klass->name, "Volatile") == 0)) {
5794 if (!cfg->llvm_only && !strcmp (cmethod->name, "Read") && fsig->param_count == 1) {
5796 MonoType *t = fsig->params [0];
5798 gboolean is_float = t->type == MONO_TYPE_R4 || t->type == MONO_TYPE_R8;
5800 g_assert (t->byref);
5801 /* t is a byref type, so the reference check is more complicated */
5802 is_ref = mini_type_is_reference (&mono_class_from_mono_type (t)->byval_arg);
5803 if (t->type == MONO_TYPE_I1)
5804 opcode = OP_ATOMIC_LOAD_I1;
5805 else if (t->type == MONO_TYPE_U1 || t->type == MONO_TYPE_BOOLEAN)
5806 opcode = OP_ATOMIC_LOAD_U1;
5807 else if (t->type == MONO_TYPE_I2)
5808 opcode = OP_ATOMIC_LOAD_I2;
5809 else if (t->type == MONO_TYPE_U2)
5810 opcode = OP_ATOMIC_LOAD_U2;
5811 else if (t->type == MONO_TYPE_I4)
5812 opcode = OP_ATOMIC_LOAD_I4;
5813 else if (t->type == MONO_TYPE_U4)
5814 opcode = OP_ATOMIC_LOAD_U4;
5815 else if (t->type == MONO_TYPE_R4)
5816 opcode = OP_ATOMIC_LOAD_R4;
5817 else if (t->type == MONO_TYPE_R8)
5818 opcode = OP_ATOMIC_LOAD_R8;
5819 #if SIZEOF_REGISTER == 8
5820 else if (t->type == MONO_TYPE_I8 || t->type == MONO_TYPE_I)
5821 opcode = OP_ATOMIC_LOAD_I8;
5822 else if (is_ref || t->type == MONO_TYPE_U8 || t->type == MONO_TYPE_U)
5823 opcode = OP_ATOMIC_LOAD_U8;
5825 else if (t->type == MONO_TYPE_I)
5826 opcode = OP_ATOMIC_LOAD_I4;
5827 else if (is_ref || t->type == MONO_TYPE_U)
5828 opcode = OP_ATOMIC_LOAD_U4;
5832 if (!mono_arch_opcode_supported (opcode))
5835 MONO_INST_NEW (cfg, ins, opcode);
5836 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : (is_float ? mono_alloc_freg (cfg) : mono_alloc_ireg (cfg));
5837 ins->sreg1 = args [0]->dreg;
5838 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_ACQ;
5839 MONO_ADD_INS (cfg->cbb, ins);
5842 case MONO_TYPE_BOOLEAN:
5849 ins->type = STACK_I4;
5853 ins->type = STACK_I8;
5857 #if SIZEOF_REGISTER == 8
5858 ins->type = STACK_I8;
5860 ins->type = STACK_I4;
5864 ins->type = cfg->r4_stack_type;
5867 ins->type = STACK_R8;
5871 ins->type = STACK_OBJ;
5877 if (!cfg->llvm_only && !strcmp (cmethod->name, "Write") && fsig->param_count == 2) {
5879 MonoType *t = fsig->params [0];
5882 g_assert (t->byref);
5883 is_ref = mini_type_is_reference (&mono_class_from_mono_type (t)->byval_arg);
5884 if (t->type == MONO_TYPE_I1)
5885 opcode = OP_ATOMIC_STORE_I1;
5886 else if (t->type == MONO_TYPE_U1 || t->type == MONO_TYPE_BOOLEAN)
5887 opcode = OP_ATOMIC_STORE_U1;
5888 else if (t->type == MONO_TYPE_I2)
5889 opcode = OP_ATOMIC_STORE_I2;
5890 else if (t->type == MONO_TYPE_U2)
5891 opcode = OP_ATOMIC_STORE_U2;
5892 else if (t->type == MONO_TYPE_I4)
5893 opcode = OP_ATOMIC_STORE_I4;
5894 else if (t->type == MONO_TYPE_U4)
5895 opcode = OP_ATOMIC_STORE_U4;
5896 else if (t->type == MONO_TYPE_R4)
5897 opcode = OP_ATOMIC_STORE_R4;
5898 else if (t->type == MONO_TYPE_R8)
5899 opcode = OP_ATOMIC_STORE_R8;
5900 #if SIZEOF_REGISTER == 8
5901 else if (t->type == MONO_TYPE_I8 || t->type == MONO_TYPE_I)
5902 opcode = OP_ATOMIC_STORE_I8;
5903 else if (is_ref || t->type == MONO_TYPE_U8 || t->type == MONO_TYPE_U)
5904 opcode = OP_ATOMIC_STORE_U8;
5906 else if (t->type == MONO_TYPE_I)
5907 opcode = OP_ATOMIC_STORE_I4;
5908 else if (is_ref || t->type == MONO_TYPE_U)
5909 opcode = OP_ATOMIC_STORE_U4;
5913 if (!mono_arch_opcode_supported (opcode))
5916 MONO_INST_NEW (cfg, ins, opcode);
5917 ins->dreg = args [0]->dreg;
5918 ins->sreg1 = args [1]->dreg;
5919 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_REL;
5920 MONO_ADD_INS (cfg->cbb, ins);
5922 if (cfg->gen_write_barriers && is_ref)
5923 mini_emit_write_barrier (cfg, args [0], args [1]);
5929 } else if (cmethod->klass->image == mono_defaults.corlib &&
5930 (strcmp (cmethod->klass->name_space, "System.Diagnostics") == 0) &&
5931 (strcmp (cmethod->klass->name, "Debugger") == 0)) {
5932 if (!strcmp (cmethod->name, "Break") && fsig->param_count == 0) {
5933 if (should_insert_brekpoint (cfg->method)) {
5934 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
5936 MONO_INST_NEW (cfg, ins, OP_NOP);
5937 MONO_ADD_INS (cfg->cbb, ins);
5941 } else if (cmethod->klass->image == mono_defaults.corlib &&
5942 (strcmp (cmethod->klass->name_space, "System") == 0) &&
5943 (strcmp (cmethod->klass->name, "Environment") == 0)) {
5944 if (!strcmp (cmethod->name, "get_IsRunningOnWindows") && fsig->param_count == 0) {
5946 EMIT_NEW_ICONST (cfg, ins, 1);
5948 EMIT_NEW_ICONST (cfg, ins, 0);
5951 } else if (cmethod->klass->image == mono_defaults.corlib &&
5952 (strcmp (cmethod->klass->name_space, "System.Reflection") == 0) &&
5953 (strcmp (cmethod->klass->name, "Assembly") == 0)) {
5954 if (cfg->llvm_only && !strcmp (cmethod->name, "GetExecutingAssembly")) {
5955 /* No stack walks are currently available, so implement this as an intrinsic */
5956 MonoInst *assembly_ins;
5958 EMIT_NEW_AOTCONST (cfg, assembly_ins, MONO_PATCH_INFO_IMAGE, cfg->method->klass->image);
5959 ins = mono_emit_jit_icall (cfg, mono_get_assembly_object, &assembly_ins);
5962 } else if (cmethod->klass->image == mono_defaults.corlib &&
5963 (strcmp (cmethod->klass->name_space, "System.Reflection") == 0) &&
5964 (strcmp (cmethod->klass->name, "MethodBase") == 0)) {
5965 if (cfg->llvm_only && !strcmp (cmethod->name, "GetCurrentMethod")) {
5966 /* No stack walks are currently available, so implement this as an intrinsic */
5967 MonoInst *method_ins;
5968 MonoMethod *declaring = cfg->method;
5970 /* This returns the declaring generic method */
5971 if (declaring->is_inflated)
5972 declaring = ((MonoMethodInflated*)cfg->method)->declaring;
5973 EMIT_NEW_AOTCONST (cfg, method_ins, MONO_PATCH_INFO_METHODCONST, declaring);
5974 ins = mono_emit_jit_icall (cfg, mono_get_method_object, &method_ins);
5975 cfg->no_inline = TRUE;
5976 if (cfg->method != cfg->current_method)
5977 inline_failure (cfg, "MethodBase:GetCurrentMethod ()");
5980 } else if (cmethod->klass == mono_defaults.math_class) {
5982 * There is general branchless code for Min/Max, but it does not work for
5984 * http://everything2.com/?node_id=1051618
5986 } else if (cmethod->klass == mono_defaults.systemtype_class && !strcmp (cmethod->name, "op_Equality")) {
5987 EMIT_NEW_BIALU (cfg, ins, OP_COMPARE, -1, args [0]->dreg, args [1]->dreg);
5988 MONO_INST_NEW (cfg, ins, OP_PCEQ);
5989 ins->dreg = alloc_preg (cfg);
5990 ins->type = STACK_I4;
5991 MONO_ADD_INS (cfg->cbb, ins);
5993 } else if (((!strcmp (cmethod->klass->image->assembly->aname.name, "MonoMac") ||
5994 !strcmp (cmethod->klass->image->assembly->aname.name, "monotouch")) &&
5995 !strcmp (cmethod->klass->name_space, "XamCore.ObjCRuntime") &&
5996 !strcmp (cmethod->klass->name, "Selector")) ||
5997 ((!strcmp (cmethod->klass->image->assembly->aname.name, "Xamarin.iOS") ||
5998 !strcmp (cmethod->klass->image->assembly->aname.name, "Xamarin.Mac")) &&
5999 !strcmp (cmethod->klass->name_space, "ObjCRuntime") &&
6000 !strcmp (cmethod->klass->name, "Selector"))
6002 if ((cfg->backend->have_objc_get_selector || cfg->compile_llvm) &&
6003 !strcmp (cmethod->name, "GetHandle") && fsig->param_count == 1 &&
6004 (args [0]->opcode == OP_GOT_ENTRY || args [0]->opcode == OP_AOTCONST) &&
6007 MonoJumpInfoToken *ji;
6010 if (args [0]->opcode == OP_GOT_ENTRY) {
6011 pi = (MonoInst *)args [0]->inst_p1;
6012 g_assert (pi->opcode == OP_PATCH_INFO);
6013 g_assert (GPOINTER_TO_INT (pi->inst_p1) == MONO_PATCH_INFO_LDSTR);
6014 ji = (MonoJumpInfoToken *)pi->inst_p0;
6016 g_assert (GPOINTER_TO_INT (args [0]->inst_p1) == MONO_PATCH_INFO_LDSTR);
6017 ji = (MonoJumpInfoToken *)args [0]->inst_p0;
6020 NULLIFY_INS (args [0]);
6022 s = mono_ldstr_utf8 (ji->image, mono_metadata_token_index (ji->token), &cfg->error);
6023 return_val_if_nok (&cfg->error, NULL);
6025 MONO_INST_NEW (cfg, ins, OP_OBJC_GET_SELECTOR);
6026 ins->dreg = mono_alloc_ireg (cfg);
6029 MONO_ADD_INS (cfg->cbb, ins);
6034 #ifdef MONO_ARCH_SIMD_INTRINSICS
6035 if (cfg->opt & MONO_OPT_SIMD) {
6036 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
6042 ins = mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
6046 if (COMPILE_LLVM (cfg)) {
6047 ins = llvm_emit_inst_for_method (cfg, cmethod, fsig, args);
6052 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
6056 * This entry point could be used later for arbitrary method
6059 inline static MonoInst*
6060 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
6061 MonoMethodSignature *signature, MonoInst **args, MonoInst *this_ins)
6063 if (method->klass == mono_defaults.string_class) {
6064 /* managed string allocation support */
6065 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS) && !(cfg->opt & MONO_OPT_SHARED)) {
6066 MonoInst *iargs [2];
6067 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
6068 MonoMethod *managed_alloc = NULL;
6070 g_assert (vtable); /*Should not fail since it System.String*/
6071 #ifndef MONO_CROSS_COMPILE
6072 managed_alloc = mono_gc_get_managed_allocator (method->klass, FALSE, FALSE);
6076 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
6077 iargs [1] = args [0];
6078 return mono_emit_method_call (cfg, managed_alloc, iargs, this_ins);
6085 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
6087 MonoInst *store, *temp;
6090 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
6091 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
6094 * FIXME: We should use *args++ = sp [0], but that would mean the arg
6095 * would be different than the MonoInst's used to represent arguments, and
6096 * the ldelema implementation can't deal with that.
6097 * Solution: When ldelema is used on an inline argument, create a var for
6098 * it, emit ldelema on that var, and emit the saving code below in
6099 * inline_method () if needed.
6101 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
6102 cfg->args [i] = temp;
6103 /* This uses cfg->args [i] which is set by the preceeding line */
6104 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
6105 store->cil_code = sp [0]->cil_code;
6110 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
6111 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
6113 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
6115 check_inline_called_method_name_limit (MonoMethod *called_method)
6118 static const char *limit = NULL;
6120 if (limit == NULL) {
6121 const char *limit_string = g_getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
6123 if (limit_string != NULL)
6124 limit = limit_string;
6129 if (limit [0] != '\0') {
6130 char *called_method_name = mono_method_full_name (called_method, TRUE);
6132 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
6133 g_free (called_method_name);
6135 //return (strncmp_result <= 0);
6136 return (strncmp_result == 0);
6143 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
6145 check_inline_caller_method_name_limit (MonoMethod *caller_method)
6148 static const char *limit = NULL;
6150 if (limit == NULL) {
6151 const char *limit_string = g_getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
6152 if (limit_string != NULL) {
6153 limit = limit_string;
6159 if (limit [0] != '\0') {
6160 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
6162 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
6163 g_free (caller_method_name);
6165 //return (strncmp_result <= 0);
6166 return (strncmp_result == 0);
6174 emit_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
6176 static double r8_0 = 0.0;
6177 static float r4_0 = 0.0;
6181 rtype = mini_get_underlying_type (rtype);
6185 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
6186 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
6187 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
6188 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
6189 MONO_EMIT_NEW_I8CONST (cfg, dreg, 0);
6190 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
6191 MONO_INST_NEW (cfg, ins, OP_R4CONST);
6192 ins->type = STACK_R4;
6193 ins->inst_p0 = (void*)&r4_0;
6195 MONO_ADD_INS (cfg->cbb, ins);
6196 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
6197 MONO_INST_NEW (cfg, ins, OP_R8CONST);
6198 ins->type = STACK_R8;
6199 ins->inst_p0 = (void*)&r8_0;
6201 MONO_ADD_INS (cfg->cbb, ins);
6202 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
6203 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
6204 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
6205 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (rtype)) {
6206 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
6208 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
6213 emit_dummy_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
6217 rtype = mini_get_underlying_type (rtype);
6221 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_PCONST);
6222 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
6223 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_ICONST);
6224 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
6225 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_I8CONST);
6226 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
6227 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R4CONST);
6228 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
6229 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R8CONST);
6230 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
6231 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
6232 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
6233 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (rtype)) {
6234 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
6236 emit_init_rvar (cfg, dreg, rtype);
6240 /* If INIT is FALSE, emit dummy initialization statements to keep the IR valid */
6242 emit_init_local (MonoCompile *cfg, int local, MonoType *type, gboolean init)
6244 MonoInst *var = cfg->locals [local];
6245 if (COMPILE_SOFT_FLOAT (cfg)) {
6247 int reg = alloc_dreg (cfg, (MonoStackType)var->type);
6248 emit_init_rvar (cfg, reg, type);
6249 EMIT_NEW_LOCSTORE (cfg, store, local, cfg->cbb->last_ins);
6252 emit_init_rvar (cfg, var->dreg, type);
6254 emit_dummy_init_rvar (cfg, var->dreg, type);
6259 mini_inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp, guchar *ip, guint real_offset, gboolean inline_always)
6261 return inline_method (cfg, cmethod, fsig, sp, ip, real_offset, inline_always);
6267 * Return the cost of inlining CMETHOD, or zero if it should not be inlined.
6270 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
6271 guchar *ip, guint real_offset, gboolean inline_always)
6274 MonoInst *ins, *rvar = NULL;
6275 MonoMethodHeader *cheader;
6276 MonoBasicBlock *ebblock, *sbblock;
6278 MonoMethod *prev_inlined_method;
6279 MonoInst **prev_locals, **prev_args;
6280 MonoType **prev_arg_types;
6281 guint prev_real_offset;
6282 GHashTable *prev_cbb_hash;
6283 MonoBasicBlock **prev_cil_offset_to_bb;
6284 MonoBasicBlock *prev_cbb;
6285 const unsigned char *prev_ip;
6286 unsigned char *prev_cil_start;
6287 guint32 prev_cil_offset_to_bb_len;
6288 MonoMethod *prev_current_method;
6289 MonoGenericContext *prev_generic_context;
6290 gboolean ret_var_set, prev_ret_var_set, prev_disable_inline, virtual_ = FALSE;
6292 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
6294 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
6295 if ((! inline_always) && ! check_inline_called_method_name_limit (cmethod))
6298 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
6299 if ((! inline_always) && ! check_inline_caller_method_name_limit (cfg->method))
6304 fsig = mono_method_signature (cmethod);
6306 if (cfg->verbose_level > 2)
6307 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
6309 if (!cmethod->inline_info) {
6310 cfg->stat_inlineable_methods++;
6311 cmethod->inline_info = 1;
6314 /* allocate local variables */
6315 cheader = mono_method_get_header_checked (cmethod, &error);
6317 if (inline_always) {
6318 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
6319 mono_error_move (&cfg->error, &error);
6321 mono_error_cleanup (&error);
6326 /*Must verify before creating locals as it can cause the JIT to assert.*/
6327 if (mono_compile_is_broken (cfg, cmethod, FALSE)) {
6328 mono_metadata_free_mh (cheader);
6332 /* allocate space to store the return value */
6333 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6334 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
6337 prev_locals = cfg->locals;
6338 cfg->locals = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
6339 for (i = 0; i < cheader->num_locals; ++i)
6340 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
6342 /* allocate start and end blocks */
6343 /* This is needed so if the inline is aborted, we can clean up */
6344 NEW_BBLOCK (cfg, sbblock);
6345 sbblock->real_offset = real_offset;
6347 NEW_BBLOCK (cfg, ebblock);
6348 ebblock->block_num = cfg->num_bblocks++;
6349 ebblock->real_offset = real_offset;
6351 prev_args = cfg->args;
6352 prev_arg_types = cfg->arg_types;
6353 prev_inlined_method = cfg->inlined_method;
6354 cfg->inlined_method = cmethod;
6355 cfg->ret_var_set = FALSE;
6356 cfg->inline_depth ++;
6357 prev_real_offset = cfg->real_offset;
6358 prev_cbb_hash = cfg->cbb_hash;
6359 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
6360 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
6361 prev_cil_start = cfg->cil_start;
6363 prev_cbb = cfg->cbb;
6364 prev_current_method = cfg->current_method;
6365 prev_generic_context = cfg->generic_context;
6366 prev_ret_var_set = cfg->ret_var_set;
6367 prev_disable_inline = cfg->disable_inline;
6369 if (ip && *ip == CEE_CALLVIRT && !(cmethod->flags & METHOD_ATTRIBUTE_STATIC))
6372 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, sp, real_offset, virtual_);
6374 ret_var_set = cfg->ret_var_set;
6376 cfg->inlined_method = prev_inlined_method;
6377 cfg->real_offset = prev_real_offset;
6378 cfg->cbb_hash = prev_cbb_hash;
6379 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
6380 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
6381 cfg->cil_start = prev_cil_start;
6383 cfg->locals = prev_locals;
6384 cfg->args = prev_args;
6385 cfg->arg_types = prev_arg_types;
6386 cfg->current_method = prev_current_method;
6387 cfg->generic_context = prev_generic_context;
6388 cfg->ret_var_set = prev_ret_var_set;
6389 cfg->disable_inline = prev_disable_inline;
6390 cfg->inline_depth --;
6392 if ((costs >= 0 && costs < 60) || inline_always || (costs >= 0 && (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))) {
6393 if (cfg->verbose_level > 2)
6394 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
6396 cfg->stat_inlined_methods++;
6398 /* always add some code to avoid block split failures */
6399 MONO_INST_NEW (cfg, ins, OP_NOP);
6400 MONO_ADD_INS (prev_cbb, ins);
6402 prev_cbb->next_bb = sbblock;
6403 link_bblock (cfg, prev_cbb, sbblock);
6406 * Get rid of the begin and end bblocks if possible to aid local
6409 if (prev_cbb->out_count == 1)
6410 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
6412 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
6413 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
6415 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
6416 MonoBasicBlock *prev = ebblock->in_bb [0];
6418 if (prev->next_bb == ebblock) {
6419 mono_merge_basic_blocks (cfg, prev, ebblock);
6421 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
6422 mono_merge_basic_blocks (cfg, prev_cbb, prev);
6423 cfg->cbb = prev_cbb;
6426 /* There could be a bblock after 'prev', and making 'prev' the current bb could cause problems */
6431 * Its possible that the rvar is set in some prev bblock, but not in others.
6437 for (i = 0; i < ebblock->in_count; ++i) {
6438 bb = ebblock->in_bb [i];
6440 if (bb->last_ins && bb->last_ins->opcode == OP_NOT_REACHED) {
6443 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
6453 * If the inlined method contains only a throw, then the ret var is not
6454 * set, so set it to a dummy value.
6457 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
6459 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
6462 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
6465 if (cfg->verbose_level > 2)
6466 printf ("INLINE ABORTED %s (cost %d)\n", mono_method_full_name (cmethod, TRUE), costs);
6467 cfg->exception_type = MONO_EXCEPTION_NONE;
6469 /* This gets rid of the newly added bblocks */
6470 cfg->cbb = prev_cbb;
6472 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
6477 * Some of these comments may well be out-of-date.
6478 * Design decisions: we do a single pass over the IL code (and we do bblock
6479 * splitting/merging in the few cases when it's required: a back jump to an IL
6480 * address that was not already seen as bblock starting point).
6481 * Code is validated as we go (full verification is still better left to metadata/verify.c).
6482 * Complex operations are decomposed in simpler ones right away. We need to let the
6483 * arch-specific code peek and poke inside this process somehow (except when the
6484 * optimizations can take advantage of the full semantic info of coarse opcodes).
6485 * All the opcodes of the form opcode.s are 'normalized' to opcode.
6486 * MonoInst->opcode initially is the IL opcode or some simplification of that
6487 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
6488 * opcode with value bigger than OP_LAST.
6489 * At this point the IR can be handed over to an interpreter, a dumb code generator
6490 * or to the optimizing code generator that will translate it to SSA form.
6492 * Profiling directed optimizations.
6493 * We may compile by default with few or no optimizations and instrument the code
6494 * or the user may indicate what methods to optimize the most either in a config file
6495 * or through repeated runs where the compiler applies offline the optimizations to
6496 * each method and then decides if it was worth it.
6499 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
6500 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
6501 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
6502 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
6503 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
6504 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
6505 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
6506 #define CHECK_TYPELOAD(klass) if (!(klass) || mono_class_has_failure (klass)) TYPE_LOAD_ERROR ((klass))
6508 /* offset from br.s -> br like opcodes */
6509 #define BIG_BRANCH_OFFSET 13
6512 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
6514 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
6516 return b == NULL || b == bb;
6520 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
6522 unsigned char *ip = start;
6523 unsigned char *target;
6526 MonoBasicBlock *bblock;
6527 const MonoOpcode *opcode;
6530 cli_addr = ip - start;
6531 i = mono_opcode_value ((const guint8 **)&ip, end);
6534 opcode = &mono_opcodes [i];
6535 switch (opcode->argument) {
6536 case MonoInlineNone:
6539 case MonoInlineString:
6540 case MonoInlineType:
6541 case MonoInlineField:
6542 case MonoInlineMethod:
6545 case MonoShortInlineR:
6552 case MonoShortInlineVar:
6553 case MonoShortInlineI:
6556 case MonoShortInlineBrTarget:
6557 target = start + cli_addr + 2 + (signed char)ip [1];
6558 GET_BBLOCK (cfg, bblock, target);
6561 GET_BBLOCK (cfg, bblock, ip);
6563 case MonoInlineBrTarget:
6564 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
6565 GET_BBLOCK (cfg, bblock, target);
6568 GET_BBLOCK (cfg, bblock, ip);
6570 case MonoInlineSwitch: {
6571 guint32 n = read32 (ip + 1);
6574 cli_addr += 5 + 4 * n;
6575 target = start + cli_addr;
6576 GET_BBLOCK (cfg, bblock, target);
6578 for (j = 0; j < n; ++j) {
6579 target = start + cli_addr + (gint32)read32 (ip);
6580 GET_BBLOCK (cfg, bblock, target);
6590 g_assert_not_reached ();
6593 if (i == CEE_THROW) {
6594 unsigned char *bb_start = ip - 1;
6596 /* Find the start of the bblock containing the throw */
6598 while ((bb_start >= start) && !bblock) {
6599 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
6603 bblock->out_of_line = 1;
6613 static inline MonoMethod *
6614 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context, MonoError *error)
6620 if (m->wrapper_type != MONO_WRAPPER_NONE) {
6621 method = (MonoMethod *)mono_method_get_wrapper_data (m, token);
6623 method = mono_class_inflate_generic_method_checked (method, context, error);
6626 method = mono_get_method_checked (m->klass->image, token, klass, context, error);
6632 static inline MonoMethod *
6633 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
6636 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context, cfg ? &cfg->error : &error);
6638 if (method && cfg && !cfg->gshared && mono_class_is_open_constructed_type (&method->klass->byval_arg)) {
6639 mono_error_set_bad_image (&cfg->error, cfg->method->klass->image, "Method with open type while not compiling gshared");
6643 if (!method && !cfg)
6644 mono_error_cleanup (&error); /* FIXME don't swallow the error */
6650 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
6655 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6656 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
6658 klass = mono_class_inflate_generic_class_checked (klass, context, &error);
6659 mono_error_cleanup (&error); /* FIXME don't swallow the error */
6662 klass = mono_class_get_and_inflate_typespec_checked (method->klass->image, token, context, &error);
6663 mono_error_cleanup (&error); /* FIXME don't swallow the error */
6666 mono_class_init (klass);
6670 static inline MonoMethodSignature*
6671 mini_get_signature (MonoMethod *method, guint32 token, MonoGenericContext *context, MonoError *error)
6673 MonoMethodSignature *fsig;
6676 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6677 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
6679 fsig = mono_metadata_parse_signature_checked (method->klass->image, token, error);
6680 return_val_if_nok (error, NULL);
6683 fsig = mono_inflate_generic_signature(fsig, context, error);
6689 throw_exception (void)
6691 static MonoMethod *method = NULL;
6694 MonoSecurityManager *secman = mono_security_manager_get_methods ();
6695 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
6702 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
6704 MonoMethod *thrower = throw_exception ();
6707 EMIT_NEW_PCONST (cfg, args [0], ex);
6708 mono_emit_method_call (cfg, thrower, args, NULL);
6712 * Return the original method is a wrapper is specified. We can only access
6713 * the custom attributes from the original method.
6716 get_original_method (MonoMethod *method)
6718 if (method->wrapper_type == MONO_WRAPPER_NONE)
6721 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
6722 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
6725 /* in other cases we need to find the original method */
6726 return mono_marshal_method_from_wrapper (method);
6730 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field)
6732 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
6733 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
6735 emit_throw_exception (cfg, ex);
6739 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
6741 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
6742 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
6744 emit_throw_exception (cfg, ex);
6748 * Check that the IL instructions at ip are the array initialization
6749 * sequence and return the pointer to the data and the size.
6752 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
6755 * newarr[System.Int32]
6757 * ldtoken field valuetype ...
6758 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
6760 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
6762 guint32 token = read32 (ip + 7);
6763 guint32 field_token = read32 (ip + 2);
6764 guint32 field_index = field_token & 0xffffff;
6766 const char *data_ptr;
6768 MonoMethod *cmethod;
6769 MonoClass *dummy_class;
6770 MonoClassField *field = mono_field_from_token_checked (method->klass->image, field_token, &dummy_class, NULL, &error);
6774 mono_error_cleanup (&error); /* FIXME don't swallow the error */
6778 *out_field_token = field_token;
6780 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
6783 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
6785 switch (mini_get_underlying_type (&klass->byval_arg)->type) {
6789 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
6790 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
6807 if (size > mono_type_size (field->type, &dummy_align))
6810 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
6811 if (!image_is_dynamic (method->klass->image)) {
6812 field_index = read32 (ip + 2) & 0xffffff;
6813 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
6814 data_ptr = mono_image_rva_map (method->klass->image, rva);
6815 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
6816 /* for aot code we do the lookup on load */
6817 if (aot && data_ptr)
6818 return (const char *)GUINT_TO_POINTER (rva);
6820 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
6822 data_ptr = mono_field_get_data (field);
6830 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
6833 char *method_fname = mono_method_full_name (method, TRUE);
6835 MonoMethodHeader *header = mono_method_get_header_checked (method, &error);
6838 method_code = g_strdup_printf ("could not parse method body due to %s", mono_error_get_message (&error));
6839 mono_error_cleanup (&error);
6840 } else if (header->code_size == 0)
6841 method_code = g_strdup ("method body is empty.");
6843 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
6844 mono_cfg_set_exception_invalid_program (cfg, g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code));
6845 g_free (method_fname);
6846 g_free (method_code);
6847 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
6851 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
6854 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
6855 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
6856 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
6857 /* Optimize reg-reg moves away */
6859 * Can't optimize other opcodes, since sp[0] might point to
6860 * the last ins of a decomposed opcode.
6862 sp [0]->dreg = (cfg)->locals [n]->dreg;
6864 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
6869 * ldloca inhibits many optimizations so try to get rid of it in common
6872 static inline unsigned char *
6873 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
6883 local = read16 (ip + 2);
6887 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
6888 /* From the INITOBJ case */
6889 token = read32 (ip + 2);
6890 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
6891 CHECK_TYPELOAD (klass);
6892 type = mini_get_underlying_type (&klass->byval_arg);
6893 emit_init_local (cfg, local, type, TRUE);
6901 emit_llvmonly_virtual_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used, MonoInst **sp)
6903 MonoInst *icall_args [16];
6904 MonoInst *call_target, *ins, *vtable_ins;
6905 int arg_reg, this_reg, vtable_reg;
6906 gboolean is_iface = mono_class_is_interface (cmethod->klass);
6907 gboolean is_gsharedvt = cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig);
6908 gboolean variant_iface = FALSE;
6911 gboolean special_array_interface = cmethod->klass->is_array_special_interface;
6914 * In llvm-only mode, vtables contain function descriptors instead of
6915 * method addresses/trampolines.
6917 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
6920 slot = mono_method_get_imt_slot (cmethod);
6922 slot = mono_method_get_vtable_index (cmethod);
6924 this_reg = sp [0]->dreg;
6926 if (is_iface && mono_class_has_variant_generic_params (cmethod->klass))
6927 variant_iface = TRUE;
6929 if (!fsig->generic_param_count && !is_iface && !is_gsharedvt) {
6931 * The simplest case, a normal virtual call.
6933 int slot_reg = alloc_preg (cfg);
6934 int addr_reg = alloc_preg (cfg);
6935 int arg_reg = alloc_preg (cfg);
6936 MonoBasicBlock *non_null_bb;
6938 vtable_reg = alloc_preg (cfg);
6939 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_ins, OP_LOAD_MEMBASE, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
6940 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) + (slot * SIZEOF_VOID_P);
6942 /* Load the vtable slot, which contains a function descriptor. */
6943 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, slot_reg, vtable_reg, offset);
6945 NEW_BBLOCK (cfg, non_null_bb);
6947 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, slot_reg, 0);
6948 cfg->cbb->last_ins->flags |= MONO_INST_LIKELY;
6949 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, non_null_bb);
6952 // FIXME: Make the wrapper use the preserveall cconv
6953 // FIXME: Use one icall per slot for small slot numbers ?
6954 icall_args [0] = vtable_ins;
6955 EMIT_NEW_ICONST (cfg, icall_args [1], slot);
6956 /* Make the icall return the vtable slot value to save some code space */
6957 ins = mono_emit_jit_icall (cfg, mono_init_vtable_slot, icall_args);
6958 ins->dreg = slot_reg;
6959 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, non_null_bb);
6962 MONO_START_BB (cfg, non_null_bb);
6963 /* Load the address + arg from the vtable slot */
6964 EMIT_NEW_LOAD_MEMBASE (cfg, call_target, OP_LOAD_MEMBASE, addr_reg, slot_reg, 0);
6965 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, arg_reg, slot_reg, SIZEOF_VOID_P);
6967 return emit_extra_arg_calli (cfg, fsig, sp, arg_reg, call_target);
6970 if (!fsig->generic_param_count && is_iface && !variant_iface && !is_gsharedvt && !special_array_interface) {
6972 * A simple interface call
6974 * We make a call through an imt slot to obtain the function descriptor we need to call.
6975 * The imt slot contains a function descriptor for a runtime function + arg.
6977 int slot_reg = alloc_preg (cfg);
6978 int addr_reg = alloc_preg (cfg);
6979 int arg_reg = alloc_preg (cfg);
6980 MonoInst *thunk_addr_ins, *thunk_arg_ins, *ftndesc_ins;
6982 vtable_reg = alloc_preg (cfg);
6983 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_ins, OP_LOAD_MEMBASE, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
6984 offset = ((gint32)slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
6987 * The slot is already initialized when the vtable is created so there is no need
6991 /* Load the imt slot, which contains a function descriptor. */
6992 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, slot_reg, vtable_reg, offset);
6994 /* Load the address + arg of the imt thunk from the imt slot */
6995 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_addr_ins, OP_LOAD_MEMBASE, addr_reg, slot_reg, 0);
6996 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_arg_ins, OP_LOAD_MEMBASE, arg_reg, slot_reg, SIZEOF_VOID_P);
6998 * IMT thunks in llvm-only mode are C functions which take an info argument
6999 * plus the imt method and return the ftndesc to call.
7001 icall_args [0] = thunk_arg_ins;
7002 icall_args [1] = emit_get_rgctx_method (cfg, context_used,
7003 cmethod, MONO_RGCTX_INFO_METHOD);
7004 ftndesc_ins = mini_emit_calli (cfg, helper_sig_llvmonly_imt_trampoline, icall_args, thunk_addr_ins, NULL, NULL);
7006 return emit_llvmonly_calli (cfg, fsig, sp, ftndesc_ins);
7009 if ((fsig->generic_param_count || variant_iface || special_array_interface) && !is_gsharedvt) {
7011 * This is similar to the interface case, the vtable slot points to an imt thunk which is
7012 * dynamically extended as more instantiations are discovered.
7013 * This handles generic virtual methods both on classes and interfaces.
7015 int slot_reg = alloc_preg (cfg);
7016 int addr_reg = alloc_preg (cfg);
7017 int arg_reg = alloc_preg (cfg);
7018 int ftndesc_reg = alloc_preg (cfg);
7019 MonoInst *thunk_addr_ins, *thunk_arg_ins, *ftndesc_ins;
7020 MonoBasicBlock *slowpath_bb, *end_bb;
7022 NEW_BBLOCK (cfg, slowpath_bb);
7023 NEW_BBLOCK (cfg, end_bb);
7025 vtable_reg = alloc_preg (cfg);
7026 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_ins, OP_LOAD_MEMBASE, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
7028 offset = ((gint32)slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
7030 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) + (slot * SIZEOF_VOID_P);
7032 /* Load the slot, which contains a function descriptor. */
7033 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, slot_reg, vtable_reg, offset);
7035 /* These slots are not initialized, so fall back to the slow path until they are initialized */
7036 /* That happens when mono_method_add_generic_virtual_invocation () creates an IMT thunk */
7037 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, slot_reg, 0);
7038 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, slowpath_bb);
7041 /* Same as with iface calls */
7042 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_addr_ins, OP_LOAD_MEMBASE, addr_reg, slot_reg, 0);
7043 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_arg_ins, OP_LOAD_MEMBASE, arg_reg, slot_reg, SIZEOF_VOID_P);
7044 icall_args [0] = thunk_arg_ins;
7045 icall_args [1] = emit_get_rgctx_method (cfg, context_used,
7046 cmethod, MONO_RGCTX_INFO_METHOD);
7047 ftndesc_ins = mini_emit_calli (cfg, helper_sig_llvmonly_imt_trampoline, icall_args, thunk_addr_ins, NULL, NULL);
7048 ftndesc_ins->dreg = ftndesc_reg;
7050 * Unlike normal iface calls, these imt thunks can return NULL, i.e. when they are passed an instantiation
7051 * they don't know about yet. Fall back to the slowpath in that case.
7053 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, ftndesc_reg, 0);
7054 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, slowpath_bb);
7056 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
7059 MONO_START_BB (cfg, slowpath_bb);
7060 icall_args [0] = vtable_ins;
7061 EMIT_NEW_ICONST (cfg, icall_args [1], slot);
7062 icall_args [2] = emit_get_rgctx_method (cfg, context_used,
7063 cmethod, MONO_RGCTX_INFO_METHOD);
7065 ftndesc_ins = mono_emit_jit_icall (cfg, mono_resolve_generic_virtual_iface_call, icall_args);
7067 ftndesc_ins = mono_emit_jit_icall (cfg, mono_resolve_generic_virtual_call, icall_args);
7068 ftndesc_ins->dreg = ftndesc_reg;
7069 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
7072 MONO_START_BB (cfg, end_bb);
7073 return emit_llvmonly_calli (cfg, fsig, sp, ftndesc_ins);
7077 * Non-optimized cases
7079 icall_args [0] = sp [0];
7080 EMIT_NEW_ICONST (cfg, icall_args [1], slot);
7082 icall_args [2] = emit_get_rgctx_method (cfg, context_used,
7083 cmethod, MONO_RGCTX_INFO_METHOD);
7085 arg_reg = alloc_preg (cfg);
7086 MONO_EMIT_NEW_PCONST (cfg, arg_reg, NULL);
7087 EMIT_NEW_VARLOADA_VREG (cfg, icall_args [3], arg_reg, &mono_defaults.int_class->byval_arg);
7089 g_assert (is_gsharedvt);
7091 call_target = mono_emit_jit_icall (cfg, mono_resolve_iface_call_gsharedvt, icall_args);
7093 call_target = mono_emit_jit_icall (cfg, mono_resolve_vcall_gsharedvt, icall_args);
7096 * Pass the extra argument even if the callee doesn't receive it, most
7097 * calling conventions allow this.
7099 return emit_extra_arg_calli (cfg, fsig, sp, arg_reg, call_target);
7103 is_exception_class (MonoClass *klass)
7106 if (klass == mono_defaults.exception_class)
7108 klass = klass->parent;
7114 * is_jit_optimizer_disabled:
7116 * Determine whenever M's assembly has a DebuggableAttribute with the
7117 * IsJITOptimizerDisabled flag set.
7120 is_jit_optimizer_disabled (MonoMethod *m)
7123 MonoAssembly *ass = m->klass->image->assembly;
7124 MonoCustomAttrInfo* attrs;
7127 gboolean val = FALSE;
7130 if (ass->jit_optimizer_disabled_inited)
7131 return ass->jit_optimizer_disabled;
7133 klass = mono_class_try_get_debuggable_attribute_class ();
7137 ass->jit_optimizer_disabled = FALSE;
7138 mono_memory_barrier ();
7139 ass->jit_optimizer_disabled_inited = TRUE;
7143 attrs = mono_custom_attrs_from_assembly_checked (ass, FALSE, &error);
7144 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7146 for (i = 0; i < attrs->num_attrs; ++i) {
7147 MonoCustomAttrEntry *attr = &attrs->attrs [i];
7149 MonoMethodSignature *sig;
7151 if (!attr->ctor || attr->ctor->klass != klass)
7153 /* Decode the attribute. See reflection.c */
7154 p = (const char*)attr->data;
7155 g_assert (read16 (p) == 0x0001);
7158 // FIXME: Support named parameters
7159 sig = mono_method_signature (attr->ctor);
7160 if (sig->param_count != 2 || sig->params [0]->type != MONO_TYPE_BOOLEAN || sig->params [1]->type != MONO_TYPE_BOOLEAN)
7162 /* Two boolean arguments */
7166 mono_custom_attrs_free (attrs);
7169 ass->jit_optimizer_disabled = val;
7170 mono_memory_barrier ();
7171 ass->jit_optimizer_disabled_inited = TRUE;
7177 is_supported_tail_call (MonoCompile *cfg, MonoMethod *method, MonoMethod *cmethod, MonoMethodSignature *fsig, int call_opcode)
7179 gboolean supported_tail_call;
7182 supported_tail_call = mono_arch_tail_call_supported (cfg, mono_method_signature (method), mono_method_signature (cmethod));
7184 for (i = 0; i < fsig->param_count; ++i) {
7185 if (fsig->params [i]->byref || fsig->params [i]->type == MONO_TYPE_PTR || fsig->params [i]->type == MONO_TYPE_FNPTR)
7186 /* These can point to the current method's stack */
7187 supported_tail_call = FALSE;
7189 if (fsig->hasthis && cmethod->klass->valuetype)
7190 /* this might point to the current method's stack */
7191 supported_tail_call = FALSE;
7192 if (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)
7193 supported_tail_call = FALSE;
7194 if (cfg->method->save_lmf)
7195 supported_tail_call = FALSE;
7196 if (cmethod->wrapper_type && cmethod->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
7197 supported_tail_call = FALSE;
7198 if (call_opcode != CEE_CALL)
7199 supported_tail_call = FALSE;
7201 /* Debugging support */
7203 if (supported_tail_call) {
7204 if (!mono_debug_count ())
7205 supported_tail_call = FALSE;
7209 return supported_tail_call;
7215 * Handle calls made to ctors from NEWOBJ opcodes.
7218 handle_ctor_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used,
7219 MonoInst **sp, guint8 *ip, int *inline_costs)
7221 MonoInst *vtable_arg = NULL, *callvirt_this_arg = NULL, *ins;
7223 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
7224 mono_method_is_generic_sharable (cmethod, TRUE)) {
7225 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
7226 mono_class_vtable (cfg->domain, cmethod->klass);
7227 CHECK_TYPELOAD (cmethod->klass);
7229 vtable_arg = emit_get_rgctx_method (cfg, context_used,
7230 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7233 vtable_arg = mini_emit_get_rgctx_klass (cfg, context_used,
7234 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7236 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7238 CHECK_TYPELOAD (cmethod->klass);
7239 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7244 /* Avoid virtual calls to ctors if possible */
7245 if (mono_class_is_marshalbyref (cmethod->klass))
7246 callvirt_this_arg = sp [0];
7248 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
7249 g_assert (MONO_TYPE_IS_VOID (fsig->ret));
7250 CHECK_CFG_EXCEPTION;
7251 } else if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
7252 mono_method_check_inlining (cfg, cmethod) &&
7253 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE)) {
7256 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, FALSE))) {
7257 cfg->real_offset += 5;
7259 *inline_costs += costs - 5;
7261 INLINE_FAILURE ("inline failure");
7262 // FIXME-VT: Clean this up
7263 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig))
7264 GSHAREDVT_FAILURE(*ip);
7265 mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, callvirt_this_arg, NULL, NULL);
7267 } else if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
7270 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE);
7272 if (cfg->llvm_only) {
7273 // FIXME: Avoid initializing vtable_arg
7274 emit_llvmonly_calli (cfg, fsig, sp, addr);
7276 mini_emit_calli (cfg, fsig, sp, addr, NULL, vtable_arg);
7278 } else if (context_used &&
7279 ((!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
7280 !mono_class_generic_sharing_enabled (cmethod->klass)) || cfg->gsharedvt)) {
7281 MonoInst *cmethod_addr;
7283 /* Generic calls made out of gsharedvt methods cannot be patched, so use an indirect call */
7285 if (cfg->llvm_only) {
7286 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, cmethod,
7287 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7288 emit_llvmonly_calli (cfg, fsig, sp, addr);
7290 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
7291 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7293 mini_emit_calli (cfg, fsig, sp, cmethod_addr, NULL, vtable_arg);
7296 INLINE_FAILURE ("ctor call");
7297 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp,
7298 callvirt_this_arg, NULL, vtable_arg);
7305 emit_setret (MonoCompile *cfg, MonoInst *val)
7307 MonoType *ret_type = mini_get_underlying_type (mono_method_signature (cfg->method)->ret);
7310 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
7313 if (!cfg->vret_addr) {
7314 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, val);
7316 EMIT_NEW_RETLOADA (cfg, ret_addr);
7318 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, val->dreg);
7319 ins->klass = mono_class_from_mono_type (ret_type);
7322 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
7323 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
7324 MonoInst *iargs [1];
7328 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
7329 mono_arch_emit_setret (cfg, cfg->method, conv);
7331 mono_arch_emit_setret (cfg, cfg->method, val);
7334 mono_arch_emit_setret (cfg, cfg->method, val);
7340 * mono_method_to_ir:
7342 * Translate the .net IL into linear IR.
7344 * @start_bblock: if not NULL, the starting basic block, used during inlining.
7345 * @end_bblock: if not NULL, the ending basic block, used during inlining.
7346 * @return_var: if not NULL, the place where the return value is stored, used during inlining.
7347 * @inline_args: if not NULL, contains the arguments to the inline call
7348 * @inline_offset: if not zero, the real offset from the inline call, or zero otherwise.
7349 * @is_virtual_call: whether this method is being called as a result of a call to callvirt
7351 * This method is used to turn ECMA IL into Mono's internal Linear IR
7352 * reprensetation. It is used both for entire methods, as well as
7353 * inlining existing methods. In the former case, the @start_bblock,
7354 * @end_bblock, @return_var, @inline_args are all set to NULL, and the
7355 * inline_offset is set to zero.
7357 * Returns: the inline cost, or -1 if there was an error processing this method.
7360 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
7361 MonoInst *return_var, MonoInst **inline_args,
7362 guint inline_offset, gboolean is_virtual_call)
7365 MonoInst *ins, **sp, **stack_start;
7366 MonoBasicBlock *tblock = NULL, *init_localsbb = NULL;
7367 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
7368 MonoMethod *cmethod, *method_definition;
7369 MonoInst **arg_array;
7370 MonoMethodHeader *header;
7372 guint32 token, ins_flag;
7374 MonoClass *constrained_class = NULL;
7375 unsigned char *ip, *end, *target, *err_pos;
7376 MonoMethodSignature *sig;
7377 MonoGenericContext *generic_context = NULL;
7378 MonoGenericContainer *generic_container = NULL;
7379 MonoType **param_types;
7380 int i, n, start_new_bblock, dreg;
7381 int num_calls = 0, inline_costs = 0;
7382 int breakpoint_id = 0;
7384 GSList *class_inits = NULL;
7385 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
7387 gboolean init_locals, seq_points, skip_dead_blocks;
7388 gboolean sym_seq_points = FALSE;
7389 MonoDebugMethodInfo *minfo;
7390 MonoBitSet *seq_point_locs = NULL;
7391 MonoBitSet *seq_point_set_locs = NULL;
7393 cfg->disable_inline = is_jit_optimizer_disabled (method);
7395 /* serialization and xdomain stuff may need access to private fields and methods */
7396 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
7397 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
7398 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
7399 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
7400 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
7401 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
7403 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
7404 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
7405 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
7406 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
7407 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_STELEMREF;
7409 image = method->klass->image;
7410 header = mono_method_get_header_checked (method, &cfg->error);
7412 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
7413 goto exception_exit;
7415 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
7418 generic_container = mono_method_get_generic_container (method);
7419 sig = mono_method_signature (method);
7420 num_args = sig->hasthis + sig->param_count;
7421 ip = (unsigned char*)header->code;
7422 cfg->cil_start = ip;
7423 end = ip + header->code_size;
7424 cfg->stat_cil_code_size += header->code_size;
7426 seq_points = cfg->gen_seq_points && cfg->method == method;
7428 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
7429 /* We could hit a seq point before attaching to the JIT (#8338) */
7433 if (cfg->gen_sdb_seq_points && cfg->method == method) {
7434 minfo = mono_debug_lookup_method (method);
7436 MonoSymSeqPoint *sps;
7437 int i, n_il_offsets;
7439 mono_debug_get_seq_points (minfo, NULL, NULL, NULL, &sps, &n_il_offsets);
7440 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7441 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7442 sym_seq_points = TRUE;
7443 for (i = 0; i < n_il_offsets; ++i) {
7444 if (sps [i].il_offset < header->code_size)
7445 mono_bitset_set_fast (seq_point_locs, sps [i].il_offset);
7449 MonoDebugMethodAsyncInfo* asyncMethod = mono_debug_lookup_method_async_debug_info (method);
7451 for (i = 0; asyncMethod != NULL && i < asyncMethod->num_awaits; i++)
7453 mono_bitset_set_fast (seq_point_locs, asyncMethod->resume_offsets[i]);
7454 mono_bitset_set_fast (seq_point_locs, asyncMethod->yield_offsets[i]);
7456 mono_debug_free_method_async_debug_info (asyncMethod);
7458 } else if (!method->wrapper_type && !method->dynamic && mono_debug_image_has_debug_info (method->klass->image)) {
7459 /* Methods without line number info like auto-generated property accessors */
7460 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7461 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7462 sym_seq_points = TRUE;
7467 * Methods without init_locals set could cause asserts in various passes
7468 * (#497220). To work around this, we emit dummy initialization opcodes
7469 * (OP_DUMMY_ICONST etc.) which generate no code. These are only supported
7470 * on some platforms.
7472 if ((cfg->opt & MONO_OPT_UNSAFE) && cfg->backend->have_dummy_init)
7473 init_locals = header->init_locals;
7477 method_definition = method;
7478 while (method_definition->is_inflated) {
7479 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
7480 method_definition = imethod->declaring;
7483 /* SkipVerification is not allowed if core-clr is enabled */
7484 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
7486 dont_verify_stloc = TRUE;
7489 if (sig->is_inflated)
7490 generic_context = mono_method_get_context (method);
7491 else if (generic_container)
7492 generic_context = &generic_container->context;
7493 cfg->generic_context = generic_context;
7496 g_assert (!sig->has_type_parameters);
7498 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
7499 g_assert (method->is_inflated);
7500 g_assert (mono_method_get_context (method)->method_inst);
7502 if (method->is_inflated && mono_method_get_context (method)->method_inst)
7503 g_assert (sig->generic_param_count);
7505 if (cfg->method == method) {
7506 cfg->real_offset = 0;
7508 cfg->real_offset = inline_offset;
7511 cfg->cil_offset_to_bb = (MonoBasicBlock **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
7512 cfg->cil_offset_to_bb_len = header->code_size;
7514 cfg->current_method = method;
7516 if (cfg->verbose_level > 2)
7517 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
7519 param_types = (MonoType **)mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
7521 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
7522 for (n = 0; n < sig->param_count; ++n)
7523 param_types [n + sig->hasthis] = sig->params [n];
7524 cfg->arg_types = param_types;
7526 cfg->dont_inline = g_list_prepend (cfg->dont_inline, method);
7527 if (cfg->method == method) {
7529 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
7530 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
7533 NEW_BBLOCK (cfg, start_bblock);
7534 cfg->bb_entry = start_bblock;
7535 start_bblock->cil_code = NULL;
7536 start_bblock->cil_length = 0;
7539 NEW_BBLOCK (cfg, end_bblock);
7540 cfg->bb_exit = end_bblock;
7541 end_bblock->cil_code = NULL;
7542 end_bblock->cil_length = 0;
7543 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
7544 g_assert (cfg->num_bblocks == 2);
7546 arg_array = cfg->args;
7548 if (header->num_clauses) {
7549 cfg->spvars = g_hash_table_new (NULL, NULL);
7550 cfg->exvars = g_hash_table_new (NULL, NULL);
7552 /* handle exception clauses */
7553 for (i = 0; i < header->num_clauses; ++i) {
7554 MonoBasicBlock *try_bb;
7555 MonoExceptionClause *clause = &header->clauses [i];
7556 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
7558 try_bb->real_offset = clause->try_offset;
7559 try_bb->try_start = TRUE;
7560 try_bb->region = ((i + 1) << 8) | clause->flags;
7561 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
7562 tblock->real_offset = clause->handler_offset;
7563 tblock->flags |= BB_EXCEPTION_HANDLER;
7566 * Linking the try block with the EH block hinders inlining as we won't be able to
7567 * merge the bblocks from inlining and produce an artificial hole for no good reason.
7569 if (COMPILE_LLVM (cfg))
7570 link_bblock (cfg, try_bb, tblock);
7572 if (*(ip + clause->handler_offset) == CEE_POP)
7573 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
7575 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
7576 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
7577 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
7578 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
7579 MONO_ADD_INS (tblock, ins);
7581 if (seq_points && clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY && clause->flags != MONO_EXCEPTION_CLAUSE_FILTER) {
7582 /* finally clauses already have a seq point */
7583 /* seq points for filter clauses are emitted below */
7584 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
7585 MONO_ADD_INS (tblock, ins);
7588 /* todo: is a fault block unsafe to optimize? */
7589 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
7590 tblock->flags |= BB_EXCEPTION_UNSAFE;
7593 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
7595 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
7597 /* catch and filter blocks get the exception object on the stack */
7598 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
7599 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
7601 /* mostly like handle_stack_args (), but just sets the input args */
7602 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
7603 tblock->in_scount = 1;
7604 tblock->in_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
7605 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
7609 #ifdef MONO_CONTEXT_SET_LLVM_EXC_REG
7610 /* The EH code passes in the exception in a register to both JITted and LLVM compiled code */
7611 if (!cfg->compile_llvm) {
7612 MONO_INST_NEW (cfg, ins, OP_GET_EX_OBJ);
7613 ins->dreg = tblock->in_stack [0]->dreg;
7614 MONO_ADD_INS (tblock, ins);
7617 MonoInst *dummy_use;
7620 * Add a dummy use for the exvar so its liveness info will be
7623 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
7626 if (seq_points && clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
7627 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
7628 MONO_ADD_INS (tblock, ins);
7631 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
7632 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
7633 tblock->flags |= BB_EXCEPTION_HANDLER;
7634 tblock->real_offset = clause->data.filter_offset;
7635 tblock->in_scount = 1;
7636 tblock->in_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
7637 /* The filter block shares the exvar with the handler block */
7638 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
7639 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
7640 MONO_ADD_INS (tblock, ins);
7644 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
7645 clause->data.catch_class &&
7647 mono_class_check_context_used (clause->data.catch_class)) {
7649 * In shared generic code with catch
7650 * clauses containing type variables
7651 * the exception handling code has to
7652 * be able to get to the rgctx.
7653 * Therefore we have to make sure that
7654 * the vtable/mrgctx argument (for
7655 * static or generic methods) or the
7656 * "this" argument (for non-static
7657 * methods) are live.
7659 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
7660 mini_method_get_context (method)->method_inst ||
7661 method->klass->valuetype) {
7662 mono_get_vtable_var (cfg);
7664 MonoInst *dummy_use;
7666 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
7671 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
7672 cfg->cbb = start_bblock;
7673 cfg->args = arg_array;
7674 mono_save_args (cfg, sig, inline_args);
7677 /* FIRST CODE BLOCK */
7678 NEW_BBLOCK (cfg, tblock);
7679 tblock->cil_code = ip;
7683 ADD_BBLOCK (cfg, tblock);
7685 if (cfg->method == method) {
7686 breakpoint_id = mono_debugger_method_has_breakpoint (method);
7687 if (breakpoint_id) {
7688 MONO_INST_NEW (cfg, ins, OP_BREAK);
7689 MONO_ADD_INS (cfg->cbb, ins);
7693 /* we use a separate basic block for the initialization code */
7694 NEW_BBLOCK (cfg, init_localsbb);
7695 if (cfg->method == method)
7696 cfg->bb_init = init_localsbb;
7697 init_localsbb->real_offset = cfg->real_offset;
7698 start_bblock->next_bb = init_localsbb;
7699 init_localsbb->next_bb = cfg->cbb;
7700 link_bblock (cfg, start_bblock, init_localsbb);
7701 link_bblock (cfg, init_localsbb, cfg->cbb);
7703 cfg->cbb = init_localsbb;
7705 if (cfg->gsharedvt && cfg->method == method) {
7706 MonoGSharedVtMethodInfo *info;
7707 MonoInst *var, *locals_var;
7710 info = (MonoGSharedVtMethodInfo *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoGSharedVtMethodInfo));
7711 info->method = cfg->method;
7712 info->count_entries = 16;
7713 info->entries = (MonoRuntimeGenericContextInfoTemplate *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
7714 cfg->gsharedvt_info = info;
7716 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
7717 /* prevent it from being register allocated */
7718 //var->flags |= MONO_INST_VOLATILE;
7719 cfg->gsharedvt_info_var = var;
7721 ins = emit_get_rgctx_gsharedvt_method (cfg, mini_method_check_context_used (cfg, method), method, info);
7722 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, var->dreg, ins->dreg);
7724 /* Allocate locals */
7725 locals_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
7726 /* prevent it from being register allocated */
7727 //locals_var->flags |= MONO_INST_VOLATILE;
7728 cfg->gsharedvt_locals_var = locals_var;
7730 dreg = alloc_ireg (cfg);
7731 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, dreg, var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, locals_size));
7733 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
7734 ins->dreg = locals_var->dreg;
7736 MONO_ADD_INS (cfg->cbb, ins);
7737 cfg->gsharedvt_locals_var_ins = ins;
7739 cfg->flags |= MONO_CFG_HAS_ALLOCA;
7742 ins->flags |= MONO_INST_INIT;
7746 if (mono_security_core_clr_enabled ()) {
7747 /* check if this is native code, e.g. an icall or a p/invoke */
7748 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
7749 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
7751 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
7752 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
7754 /* if this ia a native call then it can only be JITted from platform code */
7755 if ((icall || pinvk) && method->klass && method->klass->image) {
7756 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
7757 MonoException *ex = icall ? mono_get_exception_security () :
7758 mono_get_exception_method_access ();
7759 emit_throw_exception (cfg, ex);
7766 CHECK_CFG_EXCEPTION;
7768 if (header->code_size == 0)
7771 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
7776 if (cfg->method == method)
7777 mono_debug_init_method (cfg, cfg->cbb, breakpoint_id);
7779 for (n = 0; n < header->num_locals; ++n) {
7780 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
7785 /* We force the vtable variable here for all shared methods
7786 for the possibility that they might show up in a stack
7787 trace where their exact instantiation is needed. */
7788 if (cfg->gshared && method == cfg->method) {
7789 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
7790 mini_method_get_context (method)->method_inst ||
7791 method->klass->valuetype) {
7792 mono_get_vtable_var (cfg);
7794 /* FIXME: Is there a better way to do this?
7795 We need the variable live for the duration
7796 of the whole method. */
7797 cfg->args [0]->flags |= MONO_INST_VOLATILE;
7801 /* add a check for this != NULL to inlined methods */
7802 if (is_virtual_call) {
7805 NEW_ARGLOAD (cfg, arg_ins, 0);
7806 MONO_ADD_INS (cfg->cbb, arg_ins);
7807 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
7810 skip_dead_blocks = !dont_verify;
7811 if (skip_dead_blocks) {
7812 original_bb = bb = mono_basic_block_split (method, &cfg->error, header);
7817 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
7818 stack_start = sp = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
7821 start_new_bblock = 0;
7823 if (cfg->method == method)
7824 cfg->real_offset = ip - header->code;
7826 cfg->real_offset = inline_offset;
7831 if (start_new_bblock) {
7832 cfg->cbb->cil_length = ip - cfg->cbb->cil_code;
7833 if (start_new_bblock == 2) {
7834 g_assert (ip == tblock->cil_code);
7836 GET_BBLOCK (cfg, tblock, ip);
7838 cfg->cbb->next_bb = tblock;
7840 start_new_bblock = 0;
7841 for (i = 0; i < cfg->cbb->in_scount; ++i) {
7842 if (cfg->verbose_level > 3)
7843 printf ("loading %d from temp %d\n", i, (int)cfg->cbb->in_stack [i]->inst_c0);
7844 EMIT_NEW_TEMPLOAD (cfg, ins, cfg->cbb->in_stack [i]->inst_c0);
7848 g_slist_free (class_inits);
7851 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != cfg->cbb)) {
7852 link_bblock (cfg, cfg->cbb, tblock);
7853 if (sp != stack_start) {
7854 handle_stack_args (cfg, stack_start, sp - stack_start);
7856 CHECK_UNVERIFIABLE (cfg);
7858 cfg->cbb->next_bb = tblock;
7860 for (i = 0; i < cfg->cbb->in_scount; ++i) {
7861 if (cfg->verbose_level > 3)
7862 printf ("loading %d from temp %d\n", i, (int)cfg->cbb->in_stack [i]->inst_c0);
7863 EMIT_NEW_TEMPLOAD (cfg, ins, cfg->cbb->in_stack [i]->inst_c0);
7866 g_slist_free (class_inits);
7871 if (skip_dead_blocks) {
7872 int ip_offset = ip - header->code;
7874 if (ip_offset == bb->end)
7878 int op_size = mono_opcode_size (ip, end);
7879 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
7881 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
7883 if (ip_offset + op_size == bb->end) {
7884 MONO_INST_NEW (cfg, ins, OP_NOP);
7885 MONO_ADD_INS (cfg->cbb, ins);
7886 start_new_bblock = 1;
7894 * Sequence points are points where the debugger can place a breakpoint.
7895 * Currently, we generate these automatically at points where the IL
7898 if (seq_points && ((!sym_seq_points && (sp == stack_start)) || (sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code)))) {
7900 * Make methods interruptable at the beginning, and at the targets of
7901 * backward branches.
7902 * Also, do this at the start of every bblock in methods with clauses too,
7903 * to be able to handle instructions with inprecise control flow like
7905 * Backward branches are handled at the end of method-to-ir ().
7907 gboolean intr_loc = ip == header->code || (!cfg->cbb->last_ins && cfg->header->num_clauses);
7908 gboolean sym_seq_point = sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code);
7910 /* Avoid sequence points on empty IL like .volatile */
7911 // FIXME: Enable this
7912 //if (!(cfg->cbb->last_ins && cfg->cbb->last_ins->opcode == OP_SEQ_POINT)) {
7913 NEW_SEQ_POINT (cfg, ins, ip - header->code, intr_loc);
7914 if ((sp != stack_start) && !sym_seq_point)
7915 ins->flags |= MONO_INST_NONEMPTY_STACK;
7916 MONO_ADD_INS (cfg->cbb, ins);
7919 mono_bitset_set_fast (seq_point_set_locs, ip - header->code);
7922 cfg->cbb->real_offset = cfg->real_offset;
7924 if ((cfg->method == method) && cfg->coverage_info) {
7925 guint32 cil_offset = ip - header->code;
7926 cfg->coverage_info->data [cil_offset].cil_code = ip;
7928 /* TODO: Use an increment here */
7929 #if defined(TARGET_X86)
7930 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
7931 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
7933 MONO_ADD_INS (cfg->cbb, ins);
7935 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
7936 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
7940 if (cfg->verbose_level > 3)
7941 printf ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
7945 if (seq_points && !sym_seq_points && sp != stack_start) {
7947 * The C# compiler uses these nops to notify the JIT that it should
7948 * insert seq points.
7950 NEW_SEQ_POINT (cfg, ins, ip - header->code, FALSE);
7951 MONO_ADD_INS (cfg->cbb, ins);
7953 if (cfg->keep_cil_nops)
7954 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
7956 MONO_INST_NEW (cfg, ins, OP_NOP);
7958 MONO_ADD_INS (cfg->cbb, ins);
7961 if (should_insert_brekpoint (cfg->method)) {
7962 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
7964 MONO_INST_NEW (cfg, ins, OP_NOP);
7967 MONO_ADD_INS (cfg->cbb, ins);
7973 CHECK_STACK_OVF (1);
7974 n = (*ip)-CEE_LDARG_0;
7976 EMIT_NEW_ARGLOAD (cfg, ins, n);
7984 CHECK_STACK_OVF (1);
7985 n = (*ip)-CEE_LDLOC_0;
7987 EMIT_NEW_LOCLOAD (cfg, ins, n);
7996 n = (*ip)-CEE_STLOC_0;
7999 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
8001 emit_stloc_ir (cfg, sp, header, n);
8008 CHECK_STACK_OVF (1);
8011 EMIT_NEW_ARGLOAD (cfg, ins, n);
8017 CHECK_STACK_OVF (1);
8020 NEW_ARGLOADA (cfg, ins, n);
8021 MONO_ADD_INS (cfg->cbb, ins);
8031 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
8033 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
8038 CHECK_STACK_OVF (1);
8041 EMIT_NEW_LOCLOAD (cfg, ins, n);
8045 case CEE_LDLOCA_S: {
8046 unsigned char *tmp_ip;
8048 CHECK_STACK_OVF (1);
8049 CHECK_LOCAL (ip [1]);
8051 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
8057 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
8066 CHECK_LOCAL (ip [1]);
8067 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
8069 emit_stloc_ir (cfg, sp, header, ip [1]);
8074 CHECK_STACK_OVF (1);
8075 EMIT_NEW_PCONST (cfg, ins, NULL);
8076 ins->type = STACK_OBJ;
8081 CHECK_STACK_OVF (1);
8082 EMIT_NEW_ICONST (cfg, ins, -1);
8095 CHECK_STACK_OVF (1);
8096 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
8102 CHECK_STACK_OVF (1);
8104 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
8110 CHECK_STACK_OVF (1);
8111 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
8117 CHECK_STACK_OVF (1);
8118 MONO_INST_NEW (cfg, ins, OP_I8CONST);
8119 ins->type = STACK_I8;
8120 ins->dreg = alloc_dreg (cfg, STACK_I8);
8122 ins->inst_l = (gint64)read64 (ip);
8123 MONO_ADD_INS (cfg->cbb, ins);
8129 gboolean use_aotconst = FALSE;
8131 #ifdef TARGET_POWERPC
8132 /* FIXME: Clean this up */
8133 if (cfg->compile_aot)
8134 use_aotconst = TRUE;
8137 /* FIXME: we should really allocate this only late in the compilation process */
8138 f = (float *)mono_domain_alloc (cfg->domain, sizeof (float));
8140 CHECK_STACK_OVF (1);
8146 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
8148 dreg = alloc_freg (cfg);
8149 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
8150 ins->type = cfg->r4_stack_type;
8152 MONO_INST_NEW (cfg, ins, OP_R4CONST);
8153 ins->type = cfg->r4_stack_type;
8154 ins->dreg = alloc_dreg (cfg, STACK_R8);
8156 MONO_ADD_INS (cfg->cbb, ins);
8166 gboolean use_aotconst = FALSE;
8168 #ifdef TARGET_POWERPC
8169 /* FIXME: Clean this up */
8170 if (cfg->compile_aot)
8171 use_aotconst = TRUE;
8174 /* FIXME: we should really allocate this only late in the compilation process */
8175 d = (double *)mono_domain_alloc (cfg->domain, sizeof (double));
8177 CHECK_STACK_OVF (1);
8183 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
8185 dreg = alloc_freg (cfg);
8186 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
8187 ins->type = STACK_R8;
8189 MONO_INST_NEW (cfg, ins, OP_R8CONST);
8190 ins->type = STACK_R8;
8191 ins->dreg = alloc_dreg (cfg, STACK_R8);
8193 MONO_ADD_INS (cfg->cbb, ins);
8202 MonoInst *temp, *store;
8204 CHECK_STACK_OVF (1);
8208 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
8209 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
8211 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
8214 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
8227 if (sp [0]->type == STACK_R8)
8228 /* we need to pop the value from the x86 FP stack */
8229 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
8234 MonoMethodSignature *fsig;
8237 INLINE_FAILURE ("jmp");
8238 GSHAREDVT_FAILURE (*ip);
8241 if (stack_start != sp)
8243 token = read32 (ip + 1);
8244 /* FIXME: check the signature matches */
8245 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
8248 if (cfg->gshared && mono_method_check_context_used (cmethod))
8249 GENERIC_SHARING_FAILURE (CEE_JMP);
8251 emit_instrumentation_call (cfg, mono_profiler_method_leave);
8253 fsig = mono_method_signature (cmethod);
8254 n = fsig->param_count + fsig->hasthis;
8255 if (cfg->llvm_only) {
8258 args = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
8259 for (i = 0; i < n; ++i)
8260 EMIT_NEW_ARGLOAD (cfg, args [i], i);
8261 ins = mono_emit_method_call_full (cfg, cmethod, fsig, TRUE, args, NULL, NULL, NULL);
8263 * The code in mono-basic-block.c treats the rest of the code as dead, but we
8264 * have to emit a normal return since llvm expects it.
8267 emit_setret (cfg, ins);
8268 MONO_INST_NEW (cfg, ins, OP_BR);
8269 ins->inst_target_bb = end_bblock;
8270 MONO_ADD_INS (cfg->cbb, ins);
8271 link_bblock (cfg, cfg->cbb, end_bblock);
8274 } else if (cfg->backend->have_op_tail_call) {
8275 /* Handle tail calls similarly to calls */
8278 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
8279 call->method = cmethod;
8280 call->tail_call = TRUE;
8281 call->signature = mono_method_signature (cmethod);
8282 call->args = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
8283 call->inst.inst_p0 = cmethod;
8284 for (i = 0; i < n; ++i)
8285 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
8287 if (mini_type_is_vtype (mini_get_underlying_type (call->signature->ret)))
8288 call->vret_var = cfg->vret_addr;
8290 mono_arch_emit_call (cfg, call);
8291 cfg->param_area = MAX(cfg->param_area, call->stack_usage);
8292 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
8294 for (i = 0; i < num_args; ++i)
8295 /* Prevent arguments from being optimized away */
8296 arg_array [i]->flags |= MONO_INST_VOLATILE;
8298 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
8299 ins = (MonoInst*)call;
8300 ins->inst_p0 = cmethod;
8301 MONO_ADD_INS (cfg->cbb, ins);
8305 start_new_bblock = 1;
8310 MonoMethodSignature *fsig;
8313 token = read32 (ip + 1);
8317 //GSHAREDVT_FAILURE (*ip);
8322 fsig = mini_get_signature (method, token, generic_context, &cfg->error);
8325 if (method->dynamic && fsig->pinvoke) {
8329 * This is a call through a function pointer using a pinvoke
8330 * signature. Have to create a wrapper and call that instead.
8331 * FIXME: This is very slow, need to create a wrapper at JIT time
8332 * instead based on the signature.
8334 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
8335 EMIT_NEW_PCONST (cfg, args [1], fsig);
8337 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
8340 n = fsig->param_count + fsig->hasthis;
8344 //g_assert (!virtual_ || fsig->hasthis);
8348 inline_costs += 10 * num_calls++;
8351 * Making generic calls out of gsharedvt methods.
8352 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
8353 * patching gshared method addresses into a gsharedvt method.
8355 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
8357 * We pass the address to the gsharedvt trampoline in the rgctx reg
8359 MonoInst *callee = addr;
8361 if (method->wrapper_type != MONO_WRAPPER_DELEGATE_INVOKE)
8363 GSHAREDVT_FAILURE (*ip);
8367 GSHAREDVT_FAILURE (*ip);
8369 addr = emit_get_rgctx_sig (cfg, context_used,
8370 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
8371 ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, callee);
8375 /* Prevent inlining of methods with indirect calls */
8376 INLINE_FAILURE ("indirect call");
8378 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST || addr->opcode == OP_GOT_ENTRY) {
8379 MonoJumpInfoType info_type;
8383 * Instead of emitting an indirect call, emit a direct call
8384 * with the contents of the aotconst as the patch info.
8386 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST) {
8387 info_type = (MonoJumpInfoType)addr->inst_c1;
8388 info_data = addr->inst_p0;
8390 info_type = (MonoJumpInfoType)addr->inst_right->inst_c1;
8391 info_data = addr->inst_right->inst_left;
8394 if (info_type == MONO_PATCH_INFO_ICALL_ADDR) {
8395 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR_CALL, info_data, fsig, sp);
8398 } else if (info_type == MONO_PATCH_INFO_JIT_ICALL_ADDR) {
8399 ins = (MonoInst*)mono_emit_abs_call (cfg, info_type, info_data, fsig, sp);
8404 ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8408 /* End of call, INS should contain the result of the call, if any */
8410 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8412 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
8415 CHECK_CFG_EXCEPTION;
8419 constrained_class = NULL;
8423 case CEE_CALLVIRT: {
8424 MonoInst *addr = NULL;
8425 MonoMethodSignature *fsig = NULL;
8427 int virtual_ = *ip == CEE_CALLVIRT;
8428 gboolean pass_imt_from_rgctx = FALSE;
8429 MonoInst *imt_arg = NULL;
8430 MonoInst *keep_this_alive = NULL;
8431 gboolean pass_vtable = FALSE;
8432 gboolean pass_mrgctx = FALSE;
8433 MonoInst *vtable_arg = NULL;
8434 gboolean check_this = FALSE;
8435 gboolean supported_tail_call = FALSE;
8436 gboolean tail_call = FALSE;
8437 gboolean need_seq_point = FALSE;
8438 guint32 call_opcode = *ip;
8439 gboolean emit_widen = TRUE;
8440 gboolean push_res = TRUE;
8441 gboolean skip_ret = FALSE;
8442 gboolean delegate_invoke = FALSE;
8443 gboolean direct_icall = FALSE;
8444 gboolean constrained_partial_call = FALSE;
8445 MonoMethod *cil_method;
8448 token = read32 (ip + 1);
8452 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
8455 cil_method = cmethod;
8457 if (constrained_class) {
8458 if ((constrained_class->byval_arg.type == MONO_TYPE_VAR || constrained_class->byval_arg.type == MONO_TYPE_MVAR) && cfg->gshared) {
8459 if (!mini_is_gsharedvt_klass (constrained_class)) {
8460 g_assert (!cmethod->klass->valuetype);
8461 if (!mini_type_is_reference (&constrained_class->byval_arg))
8462 constrained_partial_call = TRUE;
8466 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8467 if (cfg->verbose_level > 2)
8468 printf ("DM Constrained call to %s\n", mono_type_get_full_name (constrained_class));
8469 if (!((constrained_class->byval_arg.type == MONO_TYPE_VAR ||
8470 constrained_class->byval_arg.type == MONO_TYPE_MVAR) &&
8472 cmethod = mono_get_method_constrained_with_method (image, cil_method, constrained_class, generic_context, &cfg->error);
8476 if (cfg->verbose_level > 2)
8477 printf ("Constrained call to %s\n", mono_type_get_full_name (constrained_class));
8479 if ((constrained_class->byval_arg.type == MONO_TYPE_VAR || constrained_class->byval_arg.type == MONO_TYPE_MVAR) && cfg->gshared) {
8481 * This is needed since get_method_constrained can't find
8482 * the method in klass representing a type var.
8483 * The type var is guaranteed to be a reference type in this
8486 if (!mini_is_gsharedvt_klass (constrained_class))
8487 g_assert (!cmethod->klass->valuetype);
8489 cmethod = mono_get_method_constrained_checked (image, token, constrained_class, generic_context, &cil_method, &cfg->error);
8494 if (constrained_class->enumtype && !strcmp (cmethod->name, "GetHashCode")) {
8495 /* Use the corresponding method from the base type to avoid boxing */
8496 MonoType *base_type = mono_class_enum_basetype (constrained_class);
8497 g_assert (base_type);
8498 constrained_class = mono_class_from_mono_type (base_type);
8499 cmethod = mono_class_get_method_from_name (constrained_class, cmethod->name, 0);
8504 if (!dont_verify && !cfg->skip_visibility) {
8505 MonoMethod *target_method = cil_method;
8506 if (method->is_inflated) {
8507 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context), &cfg->error);
8510 if (!mono_method_can_access_method (method_definition, target_method) &&
8511 !mono_method_can_access_method (method, cil_method))
8512 emit_method_access_failure (cfg, method, cil_method);
8515 if (mono_security_core_clr_enabled ())
8516 ensure_method_is_allowed_to_call_method (cfg, method, cil_method);
8518 if (!virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
8519 /* MS.NET seems to silently convert this to a callvirt */
8524 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
8525 * converts to a callvirt.
8527 * tests/bug-515884.il is an example of this behavior
8529 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
8530 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
8531 if (!virtual_ && mono_class_is_marshalbyref (cmethod->klass) && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
8535 if (!cmethod->klass->inited)
8536 if (!mono_class_init (cmethod->klass))
8537 TYPE_LOAD_ERROR (cmethod->klass);
8539 fsig = mono_method_signature (cmethod);
8542 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
8543 mini_class_is_system_array (cmethod->klass)) {
8544 array_rank = cmethod->klass->rank;
8545 } else if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) && icall_is_direct_callable (cfg, cmethod)) {
8546 direct_icall = TRUE;
8547 } else if (fsig->pinvoke) {
8548 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod, TRUE, cfg->compile_aot);
8549 fsig = mono_method_signature (wrapper);
8550 } else if (constrained_class) {
8552 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
8556 if (cfg->llvm_only && !cfg->method->wrapper_type && (!cmethod || cmethod->is_inflated))
8557 cfg->signatures = g_slist_prepend_mempool (cfg->mempool, cfg->signatures, fsig);
8559 /* See code below */
8560 if (cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
8561 MonoBasicBlock *tbb;
8563 GET_BBLOCK (cfg, tbb, ip + 5);
8564 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
8566 * We want to extend the try block to cover the call, but we can't do it if the
8567 * call is made directly since its followed by an exception check.
8569 direct_icall = FALSE;
8573 mono_save_token_info (cfg, image, token, cil_method);
8575 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip + 5 - header->code)))
8576 need_seq_point = TRUE;
8578 /* Don't support calls made using type arguments for now */
8580 if (cfg->gsharedvt) {
8581 if (mini_is_gsharedvt_signature (fsig))
8582 GSHAREDVT_FAILURE (*ip);
8586 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
8587 g_assert_not_reached ();
8589 n = fsig->param_count + fsig->hasthis;
8591 if (!cfg->gshared && mono_class_is_gtd (cmethod->klass))
8595 g_assert (!mono_method_check_context_used (cmethod));
8599 //g_assert (!virtual_ || fsig->hasthis);
8603 if (cmethod && cmethod->klass->image == mono_defaults.corlib && !strcmp (cmethod->klass->name, "ThrowHelper"))
8604 cfg->cbb->out_of_line = TRUE;
8607 * We have the `constrained.' prefix opcode.
8609 if (constrained_class) {
8610 if (mini_is_gsharedvt_klass (constrained_class)) {
8611 if ((cmethod->klass != mono_defaults.object_class) && constrained_class->valuetype && cmethod->klass->valuetype) {
8612 /* The 'Own method' case below */
8613 } else if (cmethod->klass->image != mono_defaults.corlib && !mono_class_is_interface (cmethod->klass) && !cmethod->klass->valuetype) {
8614 /* 'The type parameter is instantiated as a reference type' case below. */
8616 ins = handle_constrained_gsharedvt_call (cfg, cmethod, fsig, sp, constrained_class, &emit_widen);
8617 CHECK_CFG_EXCEPTION;
8623 if (constrained_partial_call) {
8624 gboolean need_box = TRUE;
8627 * The receiver is a valuetype, but the exact type is not known at compile time. This means the
8628 * called method is not known at compile time either. The called method could end up being
8629 * one of the methods on the parent classes (object/valuetype/enum), in which case we need
8630 * to box the receiver.
8631 * A simple solution would be to box always and make a normal virtual call, but that would
8632 * be bad performance wise.
8634 if (mono_class_is_interface (cmethod->klass) && mono_class_is_ginst (cmethod->klass)) {
8636 * The parent classes implement no generic interfaces, so the called method will be a vtype method, so no boxing neccessary.
8641 if (!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
8642 /* The called method is not virtual, i.e. Object:GetType (), the receiver is a vtype, has to box */
8643 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
8644 ins->klass = constrained_class;
8645 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
8646 CHECK_CFG_EXCEPTION;
8647 } else if (need_box) {
8649 MonoBasicBlock *is_ref_bb, *end_bb;
8650 MonoInst *nonbox_call;
8653 * Determine at runtime whenever the called method is defined on object/valuetype/enum, and emit a boxing call
8655 * FIXME: It is possible to inline the called method in a lot of cases, i.e. for T_INT,
8656 * the no-box case goes to a method in Int32, while the box case goes to a method in Enum.
8658 addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
8660 NEW_BBLOCK (cfg, is_ref_bb);
8661 NEW_BBLOCK (cfg, end_bb);
8663 box_type = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_BOX_TYPE);
8664 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, box_type->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
8665 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
8668 nonbox_call = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8670 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
8673 MONO_START_BB (cfg, is_ref_bb);
8674 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
8675 ins->klass = constrained_class;
8676 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
8677 ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8679 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
8681 MONO_START_BB (cfg, end_bb);
8684 nonbox_call->dreg = ins->dreg;
8687 g_assert (mono_class_is_interface (cmethod->klass));
8688 addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
8689 ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8692 } else if (constrained_class->valuetype && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
8694 * The type parameter is instantiated as a valuetype,
8695 * but that type doesn't override the method we're
8696 * calling, so we need to box `this'.
8698 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
8699 ins->klass = constrained_class;
8700 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
8701 CHECK_CFG_EXCEPTION;
8702 } else if (!constrained_class->valuetype) {
8703 int dreg = alloc_ireg_ref (cfg);
8706 * The type parameter is instantiated as a reference
8707 * type. We have a managed pointer on the stack, so
8708 * we need to dereference it here.
8710 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
8711 ins->type = STACK_OBJ;
8714 if (cmethod->klass->valuetype) {
8717 /* Interface method */
8720 mono_class_setup_vtable (constrained_class);
8721 CHECK_TYPELOAD (constrained_class);
8722 ioffset = mono_class_interface_offset (constrained_class, cmethod->klass);
8724 TYPE_LOAD_ERROR (constrained_class);
8725 slot = mono_method_get_vtable_slot (cmethod);
8727 TYPE_LOAD_ERROR (cmethod->klass);
8728 cmethod = constrained_class->vtable [ioffset + slot];
8730 if (cmethod->klass == mono_defaults.enum_class) {
8731 /* Enum implements some interfaces, so treat this as the first case */
8732 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
8733 ins->klass = constrained_class;
8734 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
8735 CHECK_CFG_EXCEPTION;
8740 constrained_class = NULL;
8743 if (check_call_signature (cfg, fsig, sp))
8746 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (cmethod->name, "Invoke"))
8747 delegate_invoke = TRUE;
8749 if ((cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_sharable_method (cfg, cmethod, fsig, sp))) {
8750 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8751 type_to_eval_stack_type ((cfg), fsig->ret, ins);
8759 * If the callee is a shared method, then its static cctor
8760 * might not get called after the call was patched.
8762 if (cfg->gshared && cmethod->klass != method->klass && mono_class_is_ginst (cmethod->klass) && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
8763 emit_class_init (cfg, cmethod->klass);
8764 CHECK_TYPELOAD (cmethod->klass);
8767 check_method_sharing (cfg, cmethod, &pass_vtable, &pass_mrgctx);
8770 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
8772 context_used = mini_method_check_context_used (cfg, cmethod);
8774 if (context_used && mono_class_is_interface (cmethod->klass)) {
8775 /* Generic method interface
8776 calls are resolved via a
8777 helper function and don't
8779 if (!cmethod_context || !cmethod_context->method_inst)
8780 pass_imt_from_rgctx = TRUE;
8784 * If a shared method calls another
8785 * shared method then the caller must
8786 * have a generic sharing context
8787 * because the magic trampoline
8788 * requires it. FIXME: We shouldn't
8789 * have to force the vtable/mrgctx
8790 * variable here. Instead there
8791 * should be a flag in the cfg to
8792 * request a generic sharing context.
8795 ((cfg->method->flags & METHOD_ATTRIBUTE_STATIC) || cfg->method->klass->valuetype))
8796 mono_get_vtable_var (cfg);
8801 vtable_arg = mini_emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
8803 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
8805 CHECK_TYPELOAD (cmethod->klass);
8806 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
8811 g_assert (!vtable_arg);
8813 if (!cfg->compile_aot) {
8815 * emit_get_rgctx_method () calls mono_class_vtable () so check
8816 * for type load errors before.
8818 mono_class_setup_vtable (cmethod->klass);
8819 CHECK_TYPELOAD (cmethod->klass);
8822 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
8824 /* !marshalbyref is needed to properly handle generic methods + remoting */
8825 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
8826 MONO_METHOD_IS_FINAL (cmethod)) &&
8827 !mono_class_is_marshalbyref (cmethod->klass)) {
8834 if (pass_imt_from_rgctx) {
8835 g_assert (!pass_vtable);
8837 imt_arg = emit_get_rgctx_method (cfg, context_used,
8838 cmethod, MONO_RGCTX_INFO_METHOD);
8842 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
8844 /* Calling virtual generic methods */
8845 if (virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
8846 !(MONO_METHOD_IS_FINAL (cmethod) &&
8847 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
8848 fsig->generic_param_count &&
8849 !(cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) &&
8851 MonoInst *this_temp, *this_arg_temp, *store;
8852 MonoInst *iargs [4];
8854 g_assert (fsig->is_inflated);
8856 /* Prevent inlining of methods that contain indirect calls */
8857 INLINE_FAILURE ("virtual generic call");
8859 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig))
8860 GSHAREDVT_FAILURE (*ip);
8862 if (cfg->backend->have_generalized_imt_trampoline && cfg->backend->gshared_supported && cmethod->wrapper_type == MONO_WRAPPER_NONE) {
8863 g_assert (!imt_arg);
8865 g_assert (cmethod->is_inflated);
8866 imt_arg = emit_get_rgctx_method (cfg, context_used,
8867 cmethod, MONO_RGCTX_INFO_METHOD);
8868 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, sp [0], imt_arg, NULL);
8870 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
8871 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
8872 MONO_ADD_INS (cfg->cbb, store);
8874 /* FIXME: This should be a managed pointer */
8875 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
8877 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
8878 iargs [1] = emit_get_rgctx_method (cfg, context_used,
8879 cmethod, MONO_RGCTX_INFO_METHOD);
8880 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
8881 addr = mono_emit_jit_icall (cfg,
8882 mono_helper_compile_generic_method, iargs);
8884 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
8886 ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8893 * Implement a workaround for the inherent races involved in locking:
8899 * If a thread abort happens between the call to Monitor.Enter () and the start of the
8900 * try block, the Exit () won't be executed, see:
8901 * http://www.bluebytesoftware.com/blog/2007/01/30/MonitorEnterThreadAbortsAndOrphanedLocks.aspx
8902 * To work around this, we extend such try blocks to include the last x bytes
8903 * of the Monitor.Enter () call.
8905 if (cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
8906 MonoBasicBlock *tbb;
8908 GET_BBLOCK (cfg, tbb, ip + 5);
8910 * Only extend try blocks with a finally, to avoid catching exceptions thrown
8911 * from Monitor.Enter like ArgumentNullException.
8913 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
8914 /* Mark this bblock as needing to be extended */
8915 tbb->extend_try_block = TRUE;
8919 /* Conversion to a JIT intrinsic */
8920 if ((cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
8921 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8922 type_to_eval_stack_type ((cfg), fsig->ret, ins);
8930 if ((cfg->opt & MONO_OPT_INLINE) &&
8931 (!virtual_ || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
8932 mono_method_check_inlining (cfg, cmethod)) {
8934 gboolean always = FALSE;
8936 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
8937 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
8938 /* Prevent inlining of methods that call wrappers */
8939 INLINE_FAILURE ("wrapper call");
8940 cmethod = mono_marshal_get_native_wrapper (cmethod, TRUE, FALSE);
8944 costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, always);
8946 cfg->real_offset += 5;
8948 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8949 /* *sp is already set by inline_method */
8954 inline_costs += costs;
8960 /* Tail recursion elimination */
8961 if ((cfg->opt & MONO_OPT_TAILC) && call_opcode == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
8962 gboolean has_vtargs = FALSE;
8965 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
8966 INLINE_FAILURE ("tail call");
8968 /* keep it simple */
8969 for (i = fsig->param_count - 1; i >= 0; i--) {
8970 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
8975 if (need_seq_point) {
8976 emit_seq_point (cfg, method, ip, FALSE, TRUE);
8977 need_seq_point = FALSE;
8979 for (i = 0; i < n; ++i)
8980 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
8981 MONO_INST_NEW (cfg, ins, OP_BR);
8982 MONO_ADD_INS (cfg->cbb, ins);
8983 tblock = start_bblock->out_bb [0];
8984 link_bblock (cfg, cfg->cbb, tblock);
8985 ins->inst_target_bb = tblock;
8986 start_new_bblock = 1;
8988 /* skip the CEE_RET, too */
8989 if (ip_in_bb (cfg, cfg->cbb, ip + 5))
8996 inline_costs += 10 * num_calls++;
8999 * Synchronized wrappers.
9000 * Its hard to determine where to replace a method with its synchronized
9001 * wrapper without causing an infinite recursion. The current solution is
9002 * to add the synchronized wrapper in the trampolines, and to
9003 * change the called method to a dummy wrapper, and resolve that wrapper
9004 * to the real method in mono_jit_compile_method ().
9006 if (cfg->method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
9007 MonoMethod *orig = mono_marshal_method_from_wrapper (cfg->method);
9008 if (cmethod == orig || (cmethod->is_inflated && mono_method_get_declaring_generic_method (cmethod) == orig))
9009 cmethod = mono_marshal_get_synchronized_inner_wrapper (cmethod);
9013 * Making generic calls out of gsharedvt methods.
9014 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
9015 * patching gshared method addresses into a gsharedvt method.
9017 if (cfg->gsharedvt && (mini_is_gsharedvt_signature (fsig) || cmethod->is_inflated || mono_class_is_ginst (cmethod->klass)) &&
9018 !(cmethod->klass->rank && cmethod->klass->byval_arg.type != MONO_TYPE_SZARRAY) &&
9019 (!(cfg->llvm_only && virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL)))) {
9020 MonoRgctxInfoType info_type;
9023 //if (mono_class_is_interface (cmethod->klass))
9024 //GSHAREDVT_FAILURE (*ip);
9025 // disable for possible remoting calls
9026 if (fsig->hasthis && (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class))
9027 GSHAREDVT_FAILURE (*ip);
9028 if (fsig->generic_param_count) {
9029 /* virtual generic call */
9030 g_assert (!imt_arg);
9031 /* Same as the virtual generic case above */
9032 imt_arg = emit_get_rgctx_method (cfg, context_used,
9033 cmethod, MONO_RGCTX_INFO_METHOD);
9034 /* This is not needed, as the trampoline code will pass one, and it might be passed in the same reg as the imt arg */
9036 } else if (mono_class_is_interface (cmethod->klass) && !imt_arg) {
9037 /* This can happen when we call a fully instantiated iface method */
9038 imt_arg = emit_get_rgctx_method (cfg, context_used,
9039 cmethod, MONO_RGCTX_INFO_METHOD);
9044 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (cmethod->name, "Invoke")))
9045 keep_this_alive = sp [0];
9047 if (virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))
9048 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE_VIRT;
9050 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE;
9051 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, info_type);
9053 if (cfg->llvm_only) {
9054 // FIXME: Avoid initializing vtable_arg
9055 ins = emit_llvmonly_calli (cfg, fsig, sp, addr);
9057 ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
9062 /* Generic sharing */
9065 * Use this if the callee is gsharedvt sharable too, since
9066 * at runtime we might find an instantiation so the call cannot
9067 * be patched (the 'no_patch' code path in mini-trampolines.c).
9069 if (context_used && !imt_arg && !array_rank && !delegate_invoke &&
9070 (!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
9071 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
9072 (!virtual_ || MONO_METHOD_IS_FINAL (cmethod) ||
9073 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
9074 INLINE_FAILURE ("gshared");
9076 g_assert (cfg->gshared && cmethod);
9080 * We are compiling a call to a
9081 * generic method from shared code,
9082 * which means that we have to look up
9083 * the method in the rgctx and do an
9087 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
9089 if (cfg->llvm_only) {
9090 if (cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig))
9091 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GSHAREDVT_OUT_WRAPPER);
9093 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
9094 // FIXME: Avoid initializing imt_arg/vtable_arg
9095 ins = emit_llvmonly_calli (cfg, fsig, sp, addr);
9097 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
9098 ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
9103 /* Direct calls to icalls */
9105 MonoMethod *wrapper;
9108 /* Inline the wrapper */
9109 wrapper = mono_marshal_get_native_wrapper (cmethod, TRUE, cfg->compile_aot);
9111 costs = inline_method (cfg, wrapper, fsig, sp, ip, cfg->real_offset, TRUE);
9112 g_assert (costs > 0);
9113 cfg->real_offset += 5;
9115 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9116 /* *sp is already set by inline_method */
9121 inline_costs += costs;
9130 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
9131 MonoInst *val = sp [fsig->param_count];
9133 if (val->type == STACK_OBJ) {
9134 MonoInst *iargs [2];
9139 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
9142 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
9143 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, val->dreg);
9144 if (cfg->gen_write_barriers && val->type == STACK_OBJ && !MONO_INS_IS_PCONST_NULL (val))
9145 mini_emit_write_barrier (cfg, addr, val);
9146 if (cfg->gen_write_barriers && mini_is_gsharedvt_klass (cmethod->klass))
9147 GSHAREDVT_FAILURE (*ip);
9148 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
9149 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
9151 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
9152 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
9153 if (!cmethod->klass->element_class->valuetype && !readonly)
9154 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
9155 CHECK_TYPELOAD (cmethod->klass);
9158 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
9161 g_assert_not_reached ();
9168 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual_ ? sp [0] : NULL);
9172 /* Tail prefix / tail call optimization */
9174 /* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests */
9175 /* FIXME: runtime generic context pointer for jumps? */
9176 /* FIXME: handle this for generic sharing eventually */
9177 if ((ins_flag & MONO_INST_TAILCALL) &&
9178 !vtable_arg && !cfg->gshared && is_supported_tail_call (cfg, method, cmethod, fsig, call_opcode))
9179 supported_tail_call = TRUE;
9181 if (supported_tail_call) {
9184 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
9185 INLINE_FAILURE ("tail call");
9187 //printf ("HIT: %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
9189 if (cfg->backend->have_op_tail_call) {
9190 /* Handle tail calls similarly to normal calls */
9193 emit_instrumentation_call (cfg, mono_profiler_method_leave);
9195 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
9196 call->tail_call = TRUE;
9197 call->method = cmethod;
9198 call->signature = mono_method_signature (cmethod);
9201 * We implement tail calls by storing the actual arguments into the
9202 * argument variables, then emitting a CEE_JMP.
9204 for (i = 0; i < n; ++i) {
9205 /* Prevent argument from being register allocated */
9206 arg_array [i]->flags |= MONO_INST_VOLATILE;
9207 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
9209 ins = (MonoInst*)call;
9210 ins->inst_p0 = cmethod;
9211 ins->inst_p1 = arg_array [0];
9212 MONO_ADD_INS (cfg->cbb, ins);
9213 link_bblock (cfg, cfg->cbb, end_bblock);
9214 start_new_bblock = 1;
9216 // FIXME: Eliminate unreachable epilogs
9219 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
9220 * only reachable from this call.
9222 GET_BBLOCK (cfg, tblock, ip + 5);
9223 if (tblock == cfg->cbb || tblock->in_count == 0)
9232 * Virtual calls in llvm-only mode.
9234 if (cfg->llvm_only && virtual_ && cmethod && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL)) {
9235 ins = emit_llvmonly_virtual_call (cfg, cmethod, fsig, context_used, sp);
9240 if (!(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING) && !(cmethod->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
9241 INLINE_FAILURE ("call");
9242 ins = mono_emit_method_call_full (cfg, cmethod, fsig, tail_call, sp, virtual_ ? sp [0] : NULL,
9243 imt_arg, vtable_arg);
9245 if (tail_call && !cfg->llvm_only) {
9246 link_bblock (cfg, cfg->cbb, end_bblock);
9247 start_new_bblock = 1;
9249 // FIXME: Eliminate unreachable epilogs
9252 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
9253 * only reachable from this call.
9255 GET_BBLOCK (cfg, tblock, ip + 5);
9256 if (tblock == cfg->cbb || tblock->in_count == 0)
9263 /* End of call, INS should contain the result of the call, if any */
9265 if (push_res && !MONO_TYPE_IS_VOID (fsig->ret)) {
9268 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
9273 if (keep_this_alive) {
9274 MonoInst *dummy_use;
9276 /* See mono_emit_method_call_full () */
9277 EMIT_NEW_DUMMY_USE (cfg, dummy_use, keep_this_alive);
9280 if (cfg->llvm_only && cmethod && method_needs_stack_walk (cfg, cmethod)) {
9282 * Clang can convert these calls to tail calls which screw up the stack
9283 * walk. This happens even when the -fno-optimize-sibling-calls
9284 * option is passed to clang.
9285 * Work around this by emitting a dummy call.
9287 mono_emit_jit_icall (cfg, mono_dummy_jit_icall, NULL);
9290 CHECK_CFG_EXCEPTION;
9294 g_assert (*ip == CEE_RET);
9298 constrained_class = NULL;
9300 emit_seq_point (cfg, method, ip, FALSE, TRUE);
9304 if (cfg->method != method) {
9305 /* return from inlined method */
9307 * If in_count == 0, that means the ret is unreachable due to
9308 * being preceeded by a throw. In that case, inline_method () will
9309 * handle setting the return value
9310 * (test case: test_0_inline_throw ()).
9312 if (return_var && cfg->cbb->in_count) {
9313 MonoType *ret_type = mono_method_signature (method)->ret;
9319 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
9322 //g_assert (returnvar != -1);
9323 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
9324 cfg->ret_var_set = TRUE;
9327 emit_instrumentation_call (cfg, mono_profiler_method_leave);
9329 if (cfg->lmf_var && cfg->cbb->in_count && !cfg->llvm_only)
9333 MonoType *ret_type = mini_get_underlying_type (mono_method_signature (method)->ret);
9335 if (seq_points && !sym_seq_points) {
9337 * Place a seq point here too even through the IL stack is not
9338 * empty, so a step over on
9341 * will work correctly.
9343 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
9344 MONO_ADD_INS (cfg->cbb, ins);
9347 g_assert (!return_var);
9351 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
9354 emit_setret (cfg, *sp);
9357 if (sp != stack_start)
9359 MONO_INST_NEW (cfg, ins, OP_BR);
9361 ins->inst_target_bb = end_bblock;
9362 MONO_ADD_INS (cfg->cbb, ins);
9363 link_bblock (cfg, cfg->cbb, end_bblock);
9364 start_new_bblock = 1;
9368 MONO_INST_NEW (cfg, ins, OP_BR);
9370 target = ip + 1 + (signed char)(*ip);
9372 GET_BBLOCK (cfg, tblock, target);
9373 link_bblock (cfg, cfg->cbb, tblock);
9374 ins->inst_target_bb = tblock;
9375 if (sp != stack_start) {
9376 handle_stack_args (cfg, stack_start, sp - stack_start);
9378 CHECK_UNVERIFIABLE (cfg);
9380 MONO_ADD_INS (cfg->cbb, ins);
9381 start_new_bblock = 1;
9382 inline_costs += BRANCH_COST;
9396 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
9398 target = ip + 1 + *(signed char*)ip;
9404 inline_costs += BRANCH_COST;
9408 MONO_INST_NEW (cfg, ins, OP_BR);
9411 target = ip + 4 + (gint32)read32(ip);
9413 GET_BBLOCK (cfg, tblock, target);
9414 link_bblock (cfg, cfg->cbb, tblock);
9415 ins->inst_target_bb = tblock;
9416 if (sp != stack_start) {
9417 handle_stack_args (cfg, stack_start, sp - stack_start);
9419 CHECK_UNVERIFIABLE (cfg);
9422 MONO_ADD_INS (cfg->cbb, ins);
9424 start_new_bblock = 1;
9425 inline_costs += BRANCH_COST;
9432 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
9433 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
9434 guint32 opsize = is_short ? 1 : 4;
9436 CHECK_OPSIZE (opsize);
9438 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
9441 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
9446 GET_BBLOCK (cfg, tblock, target);
9447 link_bblock (cfg, cfg->cbb, tblock);
9448 GET_BBLOCK (cfg, tblock, ip);
9449 link_bblock (cfg, cfg->cbb, tblock);
9451 if (sp != stack_start) {
9452 handle_stack_args (cfg, stack_start, sp - stack_start);
9453 CHECK_UNVERIFIABLE (cfg);
9456 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
9457 cmp->sreg1 = sp [0]->dreg;
9458 type_from_op (cfg, cmp, sp [0], NULL);
9461 #if SIZEOF_REGISTER == 4
9462 if (cmp->opcode == OP_LCOMPARE_IMM) {
9463 /* Convert it to OP_LCOMPARE */
9464 MONO_INST_NEW (cfg, ins, OP_I8CONST);
9465 ins->type = STACK_I8;
9466 ins->dreg = alloc_dreg (cfg, STACK_I8);
9468 MONO_ADD_INS (cfg->cbb, ins);
9469 cmp->opcode = OP_LCOMPARE;
9470 cmp->sreg2 = ins->dreg;
9473 MONO_ADD_INS (cfg->cbb, cmp);
9475 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
9476 type_from_op (cfg, ins, sp [0], NULL);
9477 MONO_ADD_INS (cfg->cbb, ins);
9478 ins->inst_many_bb = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
9479 GET_BBLOCK (cfg, tblock, target);
9480 ins->inst_true_bb = tblock;
9481 GET_BBLOCK (cfg, tblock, ip);
9482 ins->inst_false_bb = tblock;
9483 start_new_bblock = 2;
9486 inline_costs += BRANCH_COST;
9501 MONO_INST_NEW (cfg, ins, *ip);
9503 target = ip + 4 + (gint32)read32(ip);
9509 inline_costs += BRANCH_COST;
9513 MonoBasicBlock **targets;
9514 MonoBasicBlock *default_bblock;
9515 MonoJumpInfoBBTable *table;
9516 int offset_reg = alloc_preg (cfg);
9517 int target_reg = alloc_preg (cfg);
9518 int table_reg = alloc_preg (cfg);
9519 int sum_reg = alloc_preg (cfg);
9520 gboolean use_op_switch;
9524 n = read32 (ip + 1);
9527 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
9531 CHECK_OPSIZE (n * sizeof (guint32));
9532 target = ip + n * sizeof (guint32);
9534 GET_BBLOCK (cfg, default_bblock, target);
9535 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
9537 targets = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
9538 for (i = 0; i < n; ++i) {
9539 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
9540 targets [i] = tblock;
9541 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
9545 if (sp != stack_start) {
9547 * Link the current bb with the targets as well, so handle_stack_args
9548 * will set their in_stack correctly.
9550 link_bblock (cfg, cfg->cbb, default_bblock);
9551 for (i = 0; i < n; ++i)
9552 link_bblock (cfg, cfg->cbb, targets [i]);
9554 handle_stack_args (cfg, stack_start, sp - stack_start);
9556 CHECK_UNVERIFIABLE (cfg);
9558 /* Undo the links */
9559 mono_unlink_bblock (cfg, cfg->cbb, default_bblock);
9560 for (i = 0; i < n; ++i)
9561 mono_unlink_bblock (cfg, cfg->cbb, targets [i]);
9564 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
9565 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
9567 for (i = 0; i < n; ++i)
9568 link_bblock (cfg, cfg->cbb, targets [i]);
9570 table = (MonoJumpInfoBBTable *)mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
9571 table->table = targets;
9572 table->table_size = n;
9574 use_op_switch = FALSE;
9576 /* ARM implements SWITCH statements differently */
9577 /* FIXME: Make it use the generic implementation */
9578 if (!cfg->compile_aot)
9579 use_op_switch = TRUE;
9582 if (COMPILE_LLVM (cfg))
9583 use_op_switch = TRUE;
9585 cfg->cbb->has_jump_table = 1;
9587 if (use_op_switch) {
9588 MONO_INST_NEW (cfg, ins, OP_SWITCH);
9589 ins->sreg1 = src1->dreg;
9590 ins->inst_p0 = table;
9591 ins->inst_many_bb = targets;
9592 ins->klass = (MonoClass *)GUINT_TO_POINTER (n);
9593 MONO_ADD_INS (cfg->cbb, ins);
9595 if (sizeof (gpointer) == 8)
9596 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
9598 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
9600 #if SIZEOF_REGISTER == 8
9601 /* The upper word might not be zero, and we add it to a 64 bit address later */
9602 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
9605 if (cfg->compile_aot) {
9606 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
9608 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
9609 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
9610 ins->inst_p0 = table;
9611 ins->dreg = table_reg;
9612 MONO_ADD_INS (cfg->cbb, ins);
9615 /* FIXME: Use load_memindex */
9616 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
9617 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
9618 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
9620 start_new_bblock = 1;
9621 inline_costs += (BRANCH_COST * 2);
9638 ins = mini_emit_memory_load (cfg, &ldind_to_type (*ip)->byval_arg, sp [0], 0, ins_flag);
9654 if (ins_flag & MONO_INST_VOLATILE) {
9655 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
9656 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
9659 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
9660 ins->flags |= ins_flag;
9663 MONO_ADD_INS (cfg->cbb, ins);
9665 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !MONO_INS_IS_PCONST_NULL (sp [1]))
9666 mini_emit_write_barrier (cfg, sp [0], sp [1]);
9675 MONO_INST_NEW (cfg, ins, (*ip));
9677 ins->sreg1 = sp [0]->dreg;
9678 ins->sreg2 = sp [1]->dreg;
9679 type_from_op (cfg, ins, sp [0], sp [1]);
9681 ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type);
9683 /* Use the immediate opcodes if possible */
9684 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
9685 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
9686 if (imm_opcode != -1) {
9687 ins->opcode = imm_opcode;
9688 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
9691 NULLIFY_INS (sp [1]);
9695 MONO_ADD_INS ((cfg)->cbb, (ins));
9697 *sp++ = mono_decompose_opcode (cfg, ins);
9714 MONO_INST_NEW (cfg, ins, (*ip));
9716 ins->sreg1 = sp [0]->dreg;
9717 ins->sreg2 = sp [1]->dreg;
9718 type_from_op (cfg, ins, sp [0], sp [1]);
9720 add_widen_op (cfg, ins, &sp [0], &sp [1]);
9721 ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type);
9723 /* FIXME: Pass opcode to is_inst_imm */
9725 /* Use the immediate opcodes if possible */
9726 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
9727 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
9728 if (imm_opcode != -1) {
9729 ins->opcode = imm_opcode;
9730 if (sp [1]->opcode == OP_I8CONST) {
9731 #if SIZEOF_REGISTER == 8
9732 ins->inst_imm = sp [1]->inst_l;
9734 ins->inst_ls_word = sp [1]->inst_ls_word;
9735 ins->inst_ms_word = sp [1]->inst_ms_word;
9739 ins->inst_imm = (gssize)(sp [1]->inst_c0);
9742 /* Might be followed by an instruction added by add_widen_op */
9743 if (sp [1]->next == NULL)
9744 NULLIFY_INS (sp [1]);
9747 MONO_ADD_INS ((cfg)->cbb, (ins));
9749 *sp++ = mono_decompose_opcode (cfg, ins);
9762 case CEE_CONV_OVF_I8:
9763 case CEE_CONV_OVF_U8:
9767 /* Special case this earlier so we have long constants in the IR */
9768 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
9769 int data = sp [-1]->inst_c0;
9770 sp [-1]->opcode = OP_I8CONST;
9771 sp [-1]->type = STACK_I8;
9772 #if SIZEOF_REGISTER == 8
9773 if ((*ip) == CEE_CONV_U8)
9774 sp [-1]->inst_c0 = (guint32)data;
9776 sp [-1]->inst_c0 = data;
9778 sp [-1]->inst_ls_word = data;
9779 if ((*ip) == CEE_CONV_U8)
9780 sp [-1]->inst_ms_word = 0;
9782 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
9784 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
9791 case CEE_CONV_OVF_I4:
9792 case CEE_CONV_OVF_I1:
9793 case CEE_CONV_OVF_I2:
9794 case CEE_CONV_OVF_I:
9795 case CEE_CONV_OVF_U:
9798 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
9799 ADD_UNOP (CEE_CONV_OVF_I8);
9806 case CEE_CONV_OVF_U1:
9807 case CEE_CONV_OVF_U2:
9808 case CEE_CONV_OVF_U4:
9811 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
9812 ADD_UNOP (CEE_CONV_OVF_U8);
9819 case CEE_CONV_OVF_I1_UN:
9820 case CEE_CONV_OVF_I2_UN:
9821 case CEE_CONV_OVF_I4_UN:
9822 case CEE_CONV_OVF_I8_UN:
9823 case CEE_CONV_OVF_U1_UN:
9824 case CEE_CONV_OVF_U2_UN:
9825 case CEE_CONV_OVF_U4_UN:
9826 case CEE_CONV_OVF_U8_UN:
9827 case CEE_CONV_OVF_I_UN:
9828 case CEE_CONV_OVF_U_UN:
9835 CHECK_CFG_EXCEPTION;
9839 case CEE_ADD_OVF_UN:
9841 case CEE_MUL_OVF_UN:
9843 case CEE_SUB_OVF_UN:
9849 GSHAREDVT_FAILURE (*ip);
9852 token = read32 (ip + 1);
9853 klass = mini_get_class (method, token, generic_context);
9854 CHECK_TYPELOAD (klass);
9856 if (generic_class_is_reference_type (cfg, klass)) {
9857 MonoInst *store, *load;
9858 int dreg = alloc_ireg_ref (cfg);
9860 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
9861 load->flags |= ins_flag;
9862 MONO_ADD_INS (cfg->cbb, load);
9864 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
9865 store->flags |= ins_flag;
9866 MONO_ADD_INS (cfg->cbb, store);
9868 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER)
9869 mini_emit_write_barrier (cfg, sp [0], sp [1]);
9871 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
9883 token = read32 (ip + 1);
9884 klass = mini_get_class (method, token, generic_context);
9885 CHECK_TYPELOAD (klass);
9887 /* Optimize the common ldobj+stloc combination */
9897 loc_index = ip [5] - CEE_STLOC_0;
9904 if ((loc_index != -1) && ip_in_bb (cfg, cfg->cbb, ip + 5)) {
9905 CHECK_LOCAL (loc_index);
9907 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
9908 ins->dreg = cfg->locals [loc_index]->dreg;
9909 ins->flags |= ins_flag;
9912 if (ins_flag & MONO_INST_VOLATILE) {
9913 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
9914 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
9920 /* Optimize the ldobj+stobj combination */
9921 /* The reference case ends up being a load+store anyway */
9922 /* Skip this if the operation is volatile. */
9923 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass) && !(ins_flag & MONO_INST_VOLATILE)) {
9928 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
9935 ins = mini_emit_memory_load (cfg, &klass->byval_arg, sp [0], 0, ins_flag);
9944 CHECK_STACK_OVF (1);
9946 n = read32 (ip + 1);
9948 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
9949 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
9950 ins->type = STACK_OBJ;
9953 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
9954 MonoInst *iargs [1];
9955 char *str = (char *)mono_method_get_wrapper_data (method, n);
9957 if (cfg->compile_aot)
9958 EMIT_NEW_LDSTRLITCONST (cfg, iargs [0], str);
9960 EMIT_NEW_PCONST (cfg, iargs [0], str);
9961 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
9963 if (cfg->opt & MONO_OPT_SHARED) {
9964 MonoInst *iargs [3];
9966 if (cfg->compile_aot) {
9967 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
9969 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
9970 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
9971 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
9972 *sp = mono_emit_jit_icall (cfg, ves_icall_mono_ldstr, iargs);
9973 mono_ldstr_checked (cfg->domain, image, mono_metadata_token_index (n), &cfg->error);
9976 if (cfg->cbb->out_of_line) {
9977 MonoInst *iargs [2];
9979 if (image == mono_defaults.corlib) {
9981 * Avoid relocations in AOT and save some space by using a
9982 * version of helper_ldstr specialized to mscorlib.
9984 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
9985 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
9987 /* Avoid creating the string object */
9988 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
9989 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
9990 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
9994 if (cfg->compile_aot) {
9995 NEW_LDSTRCONST (cfg, ins, image, n);
9997 MONO_ADD_INS (cfg->cbb, ins);
10000 NEW_PCONST (cfg, ins, NULL);
10001 ins->type = STACK_OBJ;
10002 ins->inst_p0 = mono_ldstr_checked (cfg->domain, image, mono_metadata_token_index (n), &cfg->error);
10006 OUT_OF_MEMORY_FAILURE;
10009 MONO_ADD_INS (cfg->cbb, ins);
10018 MonoInst *iargs [2];
10019 MonoMethodSignature *fsig;
10022 MonoInst *vtable_arg = NULL;
10025 token = read32 (ip + 1);
10026 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
10029 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
10032 mono_save_token_info (cfg, image, token, cmethod);
10034 if (!mono_class_init (cmethod->klass))
10035 TYPE_LOAD_ERROR (cmethod->klass);
10037 context_used = mini_method_check_context_used (cfg, cmethod);
10039 if (mono_security_core_clr_enabled ())
10040 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
10042 if (cfg->gshared && cmethod && cmethod->klass != method->klass && mono_class_is_ginst (cmethod->klass) && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
10043 emit_class_init (cfg, cmethod->klass);
10044 CHECK_TYPELOAD (cmethod->klass);
10048 if (cfg->gsharedvt) {
10049 if (mini_is_gsharedvt_variable_signature (sig))
10050 GSHAREDVT_FAILURE (*ip);
10054 n = fsig->param_count;
10058 * Generate smaller code for the common newobj <exception> instruction in
10059 * argument checking code.
10061 if (cfg->cbb->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
10062 is_exception_class (cmethod->klass) && n <= 2 &&
10063 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
10064 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
10065 MonoInst *iargs [3];
10069 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
10072 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
10075 iargs [1] = sp [0];
10076 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
10079 iargs [1] = sp [0];
10080 iargs [2] = sp [1];
10081 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
10084 g_assert_not_reached ();
10092 /* move the args to allow room for 'this' in the first position */
10098 /* check_call_signature () requires sp[0] to be set */
10099 this_ins.type = STACK_OBJ;
10100 sp [0] = &this_ins;
10101 if (check_call_signature (cfg, fsig, sp))
10106 if (mini_class_is_system_array (cmethod->klass)) {
10107 *sp = emit_get_rgctx_method (cfg, context_used,
10108 cmethod, MONO_RGCTX_INFO_METHOD);
10110 /* Avoid varargs in the common case */
10111 if (fsig->param_count == 1)
10112 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
10113 else if (fsig->param_count == 2)
10114 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
10115 else if (fsig->param_count == 3)
10116 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
10117 else if (fsig->param_count == 4)
10118 alloc = mono_emit_jit_icall (cfg, mono_array_new_4, sp);
10120 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
10121 } else if (cmethod->string_ctor) {
10122 g_assert (!context_used);
10123 g_assert (!vtable_arg);
10124 /* we simply pass a null pointer */
10125 EMIT_NEW_PCONST (cfg, *sp, NULL);
10126 /* now call the string ctor */
10127 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, NULL, NULL, NULL);
10129 if (cmethod->klass->valuetype) {
10130 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
10131 emit_init_rvar (cfg, iargs [0]->dreg, &cmethod->klass->byval_arg);
10132 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
10137 * The code generated by mini_emit_virtual_call () expects
10138 * iargs [0] to be a boxed instance, but luckily the vcall
10139 * will be transformed into a normal call there.
10141 } else if (context_used) {
10142 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
10145 MonoVTable *vtable = NULL;
10147 if (!cfg->compile_aot)
10148 vtable = mono_class_vtable (cfg->domain, cmethod->klass);
10149 CHECK_TYPELOAD (cmethod->klass);
10152 * TypeInitializationExceptions thrown from the mono_runtime_class_init
10153 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
10154 * As a workaround, we call class cctors before allocating objects.
10156 if (mini_field_access_needs_cctor_run (cfg, method, cmethod->klass, vtable) && !(g_slist_find (class_inits, cmethod->klass))) {
10157 emit_class_init (cfg, cmethod->klass);
10158 if (cfg->verbose_level > 2)
10159 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
10160 class_inits = g_slist_prepend (class_inits, cmethod->klass);
10163 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
10166 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
10169 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
10171 /* Now call the actual ctor */
10172 handle_ctor_call (cfg, cmethod, fsig, context_used, sp, ip, &inline_costs);
10173 CHECK_CFG_EXCEPTION;
10176 if (alloc == NULL) {
10178 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
10179 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
10187 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip - header->code)))
10188 emit_seq_point (cfg, method, ip, FALSE, TRUE);
10191 case CEE_CASTCLASS:
10196 token = read32 (ip + 1);
10197 klass = mini_get_class (method, token, generic_context);
10198 CHECK_TYPELOAD (klass);
10199 if (sp [0]->type != STACK_OBJ)
10202 MONO_INST_NEW (cfg, ins, *ip == CEE_ISINST ? OP_ISINST : OP_CASTCLASS);
10203 ins->dreg = alloc_preg (cfg);
10204 ins->sreg1 = (*sp)->dreg;
10205 ins->klass = klass;
10206 ins->type = STACK_OBJ;
10207 MONO_ADD_INS (cfg->cbb, ins);
10209 CHECK_CFG_EXCEPTION;
10213 cfg->flags |= MONO_CFG_HAS_TYPE_CHECK;
10216 case CEE_UNBOX_ANY: {
10217 MonoInst *res, *addr;
10222 token = read32 (ip + 1);
10223 klass = mini_get_class (method, token, generic_context);
10224 CHECK_TYPELOAD (klass);
10226 mono_save_token_info (cfg, image, token, klass);
10228 context_used = mini_class_check_context_used (cfg, klass);
10230 if (mini_is_gsharedvt_klass (klass)) {
10231 res = handle_unbox_gsharedvt (cfg, klass, *sp);
10233 } else if (generic_class_is_reference_type (cfg, klass)) {
10234 if (MONO_INS_IS_PCONST_NULL (*sp)) {
10235 EMIT_NEW_PCONST (cfg, res, NULL);
10236 res->type = STACK_OBJ;
10238 MONO_INST_NEW (cfg, res, OP_CASTCLASS);
10239 res->dreg = alloc_preg (cfg);
10240 res->sreg1 = (*sp)->dreg;
10241 res->klass = klass;
10242 res->type = STACK_OBJ;
10243 MONO_ADD_INS (cfg->cbb, res);
10244 cfg->flags |= MONO_CFG_HAS_TYPE_CHECK;
10246 } else if (mono_class_is_nullable (klass)) {
10247 res = handle_unbox_nullable (cfg, *sp, klass, context_used);
10249 addr = handle_unbox (cfg, klass, sp, context_used);
10251 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
10262 MonoClass *enum_class;
10263 MonoMethod *has_flag;
10269 token = read32 (ip + 1);
10270 klass = mini_get_class (method, token, generic_context);
10271 CHECK_TYPELOAD (klass);
10273 mono_save_token_info (cfg, image, token, klass);
10275 context_used = mini_class_check_context_used (cfg, klass);
10277 if (generic_class_is_reference_type (cfg, klass)) {
10283 if (klass == mono_defaults.void_class)
10285 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
10287 /* frequent check in generic code: box (struct), brtrue */
10292 * <push int/long ptr>
10295 * constrained. MyFlags
10296 * callvirt instace bool class [mscorlib] System.Enum::HasFlag (class [mscorlib] System.Enum)
10298 * If we find this sequence and the operand types on box and constrained
10299 * are equal, we can emit a specialized instruction sequence instead of
10300 * the very slow HasFlag () call.
10302 if ((cfg->opt & MONO_OPT_INTRINS) &&
10303 /* Cheap checks first. */
10304 ip + 5 + 6 + 5 < end &&
10305 ip [5] == CEE_PREFIX1 &&
10306 ip [6] == CEE_CONSTRAINED_ &&
10307 ip [11] == CEE_CALLVIRT &&
10308 ip_in_bb (cfg, cfg->cbb, ip + 5 + 6 + 5) &&
10309 mono_class_is_enum (klass) &&
10310 (enum_class = mini_get_class (method, read32 (ip + 7), generic_context)) &&
10311 (has_flag = mini_get_method (cfg, method, read32 (ip + 12), NULL, generic_context)) &&
10312 has_flag->klass == mono_defaults.enum_class &&
10313 !strcmp (has_flag->name, "HasFlag") &&
10314 has_flag->signature->hasthis &&
10315 has_flag->signature->param_count == 1) {
10316 CHECK_TYPELOAD (enum_class);
10318 if (enum_class == klass) {
10319 MonoInst *enum_this, *enum_flag;
10324 enum_this = sp [0];
10325 enum_flag = sp [1];
10327 *sp++ = handle_enum_has_flag (cfg, klass, enum_this, enum_flag);
10332 // FIXME: LLVM can't handle the inconsistent bb linking
10333 if (!mono_class_is_nullable (klass) &&
10334 !mini_is_gsharedvt_klass (klass) &&
10335 ip + 5 < end && ip_in_bb (cfg, cfg->cbb, ip + 5) &&
10336 (ip [5] == CEE_BRTRUE ||
10337 ip [5] == CEE_BRTRUE_S ||
10338 ip [5] == CEE_BRFALSE ||
10339 ip [5] == CEE_BRFALSE_S)) {
10340 gboolean is_true = ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S;
10342 MonoBasicBlock *true_bb, *false_bb;
10346 if (cfg->verbose_level > 3) {
10347 printf ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
10348 printf ("<box+brtrue opt>\n");
10353 case CEE_BRFALSE_S:
10356 target = ip + 1 + (signed char)(*ip);
10363 target = ip + 4 + (gint)(read32 (ip));
10367 g_assert_not_reached ();
10371 * We need to link both bblocks, since it is needed for handling stack
10372 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
10373 * Branching to only one of them would lead to inconsistencies, so
10374 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
10376 GET_BBLOCK (cfg, true_bb, target);
10377 GET_BBLOCK (cfg, false_bb, ip);
10379 mono_link_bblock (cfg, cfg->cbb, true_bb);
10380 mono_link_bblock (cfg, cfg->cbb, false_bb);
10382 if (sp != stack_start) {
10383 handle_stack_args (cfg, stack_start, sp - stack_start);
10385 CHECK_UNVERIFIABLE (cfg);
10388 if (COMPILE_LLVM (cfg)) {
10389 dreg = alloc_ireg (cfg);
10390 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
10391 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
10393 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
10395 /* The JIT can't eliminate the iconst+compare */
10396 MONO_INST_NEW (cfg, ins, OP_BR);
10397 ins->inst_target_bb = is_true ? true_bb : false_bb;
10398 MONO_ADD_INS (cfg->cbb, ins);
10401 start_new_bblock = 1;
10405 *sp++ = handle_box (cfg, val, klass, context_used);
10407 CHECK_CFG_EXCEPTION;
10416 token = read32 (ip + 1);
10417 klass = mini_get_class (method, token, generic_context);
10418 CHECK_TYPELOAD (klass);
10420 mono_save_token_info (cfg, image, token, klass);
10422 context_used = mini_class_check_context_used (cfg, klass);
10424 if (mono_class_is_nullable (klass)) {
10427 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
10428 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
10432 ins = handle_unbox (cfg, klass, sp, context_used);
10445 MonoClassField *field;
10446 #ifndef DISABLE_REMOTING
10450 gboolean is_instance;
10452 gpointer addr = NULL;
10453 gboolean is_special_static;
10455 MonoInst *store_val = NULL;
10456 MonoInst *thread_ins;
10459 is_instance = (op == CEE_LDFLD || op == CEE_LDFLDA || op == CEE_STFLD);
10461 if (op == CEE_STFLD) {
10464 store_val = sp [1];
10469 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
10471 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
10474 if (op == CEE_STSFLD) {
10477 store_val = sp [0];
10482 token = read32 (ip + 1);
10483 if (method->wrapper_type != MONO_WRAPPER_NONE) {
10484 field = (MonoClassField *)mono_method_get_wrapper_data (method, token);
10485 klass = field->parent;
10488 field = mono_field_from_token_checked (image, token, &klass, generic_context, &cfg->error);
10491 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
10492 FIELD_ACCESS_FAILURE (method, field);
10493 mono_class_init (klass);
10495 /* if the class is Critical then transparent code cannot access it's fields */
10496 if (!is_instance && mono_security_core_clr_enabled ())
10497 ensure_method_is_allowed_to_access_field (cfg, method, field);
10499 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
10500 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
10501 if (mono_security_core_clr_enabled ())
10502 ensure_method_is_allowed_to_access_field (cfg, method, field);
10505 ftype = mono_field_get_type (field);
10508 * LDFLD etc. is usable on static fields as well, so convert those cases to
10511 if (is_instance && ftype->attrs & FIELD_ATTRIBUTE_STATIC) {
10523 g_assert_not_reached ();
10525 is_instance = FALSE;
10528 context_used = mini_class_check_context_used (cfg, klass);
10530 /* INSTANCE CASE */
10532 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
10533 if (op == CEE_STFLD) {
10534 if (target_type_is_incompatible (cfg, field->type, sp [1]))
10536 #ifndef DISABLE_REMOTING
10537 if ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class) {
10538 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
10539 MonoInst *iargs [5];
10541 GSHAREDVT_FAILURE (op);
10543 iargs [0] = sp [0];
10544 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10545 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
10546 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
10548 iargs [4] = sp [1];
10550 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
10551 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
10552 iargs, ip, cfg->real_offset, TRUE);
10553 CHECK_CFG_EXCEPTION;
10554 g_assert (costs > 0);
10556 cfg->real_offset += 5;
10558 inline_costs += costs;
10560 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
10565 MonoInst *store, *wbarrier_ptr_ins = NULL;
10567 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
10569 if (ins_flag & MONO_INST_VOLATILE) {
10570 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
10571 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
10574 if (mini_is_gsharedvt_klass (klass)) {
10575 MonoInst *offset_ins;
10577 context_used = mini_class_check_context_used (cfg, klass);
10579 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10580 /* The value is offset by 1 */
10581 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
10582 dreg = alloc_ireg_mp (cfg);
10583 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10584 wbarrier_ptr_ins = ins;
10585 /* The decomposition will call mini_emit_stobj () which will emit a wbarrier if needed */
10586 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, dreg, 0, sp [1]->dreg);
10588 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
10590 if (sp [0]->opcode != OP_LDADDR)
10591 store->flags |= MONO_INST_FAULT;
10593 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !MONO_INS_IS_PCONST_NULL (sp [1])) {
10594 if (mini_is_gsharedvt_klass (klass)) {
10595 g_assert (wbarrier_ptr_ins);
10596 mini_emit_write_barrier (cfg, wbarrier_ptr_ins, sp [1]);
10598 /* insert call to write barrier */
10602 dreg = alloc_ireg_mp (cfg);
10603 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
10604 mini_emit_write_barrier (cfg, ptr, sp [1]);
10608 store->flags |= ins_flag;
10615 #ifndef DISABLE_REMOTING
10616 if (is_instance && ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class)) {
10617 MonoMethod *wrapper = (op == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
10618 MonoInst *iargs [4];
10620 GSHAREDVT_FAILURE (op);
10622 iargs [0] = sp [0];
10623 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10624 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
10625 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
10626 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
10627 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
10628 iargs, ip, cfg->real_offset, TRUE);
10629 CHECK_CFG_EXCEPTION;
10630 g_assert (costs > 0);
10632 cfg->real_offset += 5;
10636 inline_costs += costs;
10638 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
10644 if (sp [0]->type == STACK_VTYPE) {
10647 /* Have to compute the address of the variable */
10649 var = get_vreg_to_inst (cfg, sp [0]->dreg);
10651 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
10653 g_assert (var->klass == klass);
10655 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
10659 if (op == CEE_LDFLDA) {
10660 if (sp [0]->type == STACK_OBJ) {
10661 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
10662 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
10665 dreg = alloc_ireg_mp (cfg);
10667 if (mini_is_gsharedvt_klass (klass)) {
10668 MonoInst *offset_ins;
10670 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10671 /* The value is offset by 1 */
10672 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
10673 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10675 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
10677 ins->klass = mono_class_from_mono_type (field->type);
10678 ins->type = STACK_MP;
10683 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
10685 if (sp [0]->opcode == OP_LDADDR && klass->simd_type && cfg->opt & MONO_OPT_SIMD) {
10686 ins = mono_emit_simd_field_load (cfg, field, sp [0]);
10695 MonoInst *field_add_inst = sp [0];
10696 if (mini_is_gsharedvt_klass (klass)) {
10697 MonoInst *offset_ins;
10699 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10700 /* The value is offset by 1 */
10701 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
10702 EMIT_NEW_BIALU (cfg, field_add_inst, OP_PADD, alloc_ireg_mp (cfg), sp [0]->dreg, offset_ins->dreg);
10706 load = mini_emit_memory_load (cfg, field->type, field_add_inst, foffset, ins_flag);
10708 if (sp [0]->opcode != OP_LDADDR)
10709 load->flags |= MONO_INST_FAULT;
10721 context_used = mini_class_check_context_used (cfg, klass);
10723 if (ftype->attrs & FIELD_ATTRIBUTE_LITERAL) {
10724 mono_error_set_field_load (&cfg->error, field->parent, field->name, "Using static instructions with literal field");
10728 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
10729 * to be called here.
10731 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
10732 mono_class_vtable (cfg->domain, klass);
10733 CHECK_TYPELOAD (klass);
10735 mono_domain_lock (cfg->domain);
10736 if (cfg->domain->special_static_fields)
10737 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
10738 mono_domain_unlock (cfg->domain);
10740 is_special_static = mono_class_field_is_special_static (field);
10742 if (is_special_static && ((gsize)addr & 0x80000000) == 0)
10743 thread_ins = mono_create_tls_get (cfg, TLS_KEY_THREAD);
10747 /* Generate IR to compute the field address */
10748 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && thread_ins && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
10750 * Fast access to TLS data
10751 * Inline version of get_thread_static_data () in
10755 int idx, static_data_reg, array_reg, dreg;
10757 if (context_used && cfg->gsharedvt && mini_is_gsharedvt_klass (klass))
10758 GSHAREDVT_FAILURE (op);
10760 static_data_reg = alloc_ireg (cfg);
10761 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, MONO_STRUCT_OFFSET (MonoInternalThread, static_data));
10763 if (cfg->compile_aot) {
10764 int offset_reg, offset2_reg, idx_reg;
10766 /* For TLS variables, this will return the TLS offset */
10767 EMIT_NEW_SFLDACONST (cfg, ins, field);
10768 offset_reg = ins->dreg;
10769 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
10770 idx_reg = alloc_ireg (cfg);
10771 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, idx_reg, offset_reg, 0x3f);
10772 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
10773 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
10774 array_reg = alloc_ireg (cfg);
10775 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
10776 offset2_reg = alloc_ireg (cfg);
10777 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_UN_IMM, offset2_reg, offset_reg, 6);
10778 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset2_reg, 0x1ffffff);
10779 dreg = alloc_ireg (cfg);
10780 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
10782 offset = (gsize)addr & 0x7fffffff;
10783 idx = offset & 0x3f;
10785 array_reg = alloc_ireg (cfg);
10786 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
10787 dreg = alloc_ireg (cfg);
10788 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, ((offset >> 6) & 0x1ffffff));
10790 } else if ((cfg->opt & MONO_OPT_SHARED) ||
10791 (cfg->compile_aot && is_special_static) ||
10792 (context_used && is_special_static)) {
10793 MonoInst *iargs [2];
10795 g_assert (field->parent);
10796 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10797 if (context_used) {
10798 iargs [1] = emit_get_rgctx_field (cfg, context_used,
10799 field, MONO_RGCTX_INFO_CLASS_FIELD);
10801 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
10803 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
10804 } else if (context_used) {
10805 MonoInst *static_data;
10808 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
10809 method->klass->name_space, method->klass->name, method->name,
10810 depth, field->offset);
10813 if (mono_class_needs_cctor_run (klass, method))
10814 emit_class_init (cfg, klass);
10817 * The pointer we're computing here is
10819 * super_info.static_data + field->offset
10821 static_data = mini_emit_get_rgctx_klass (cfg, context_used,
10822 klass, MONO_RGCTX_INFO_STATIC_DATA);
10824 if (mini_is_gsharedvt_klass (klass)) {
10825 MonoInst *offset_ins;
10827 offset_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10828 /* The value is offset by 1 */
10829 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
10830 dreg = alloc_ireg_mp (cfg);
10831 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, static_data->dreg, offset_ins->dreg);
10832 } else if (field->offset == 0) {
10835 int addr_reg = mono_alloc_preg (cfg);
10836 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
10838 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
10839 MonoInst *iargs [2];
10841 g_assert (field->parent);
10842 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10843 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
10844 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
10846 MonoVTable *vtable = NULL;
10848 if (!cfg->compile_aot)
10849 vtable = mono_class_vtable (cfg->domain, klass);
10850 CHECK_TYPELOAD (klass);
10853 if (mini_field_access_needs_cctor_run (cfg, method, klass, vtable)) {
10854 if (!(g_slist_find (class_inits, klass))) {
10855 emit_class_init (cfg, klass);
10856 if (cfg->verbose_level > 2)
10857 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
10858 class_inits = g_slist_prepend (class_inits, klass);
10861 if (cfg->run_cctors) {
10862 /* This makes so that inline cannot trigger */
10863 /* .cctors: too many apps depend on them */
10864 /* running with a specific order... */
10866 if (! vtable->initialized)
10867 INLINE_FAILURE ("class init");
10868 if (!mono_runtime_class_init_full (vtable, &cfg->error)) {
10869 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
10870 goto exception_exit;
10874 if (cfg->compile_aot)
10875 EMIT_NEW_SFLDACONST (cfg, ins, field);
10878 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
10880 EMIT_NEW_PCONST (cfg, ins, addr);
10883 MonoInst *iargs [1];
10884 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
10885 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
10889 /* Generate IR to do the actual load/store operation */
10891 if ((op == CEE_STFLD || op == CEE_STSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
10892 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
10893 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
10896 if (op == CEE_LDSFLDA) {
10897 ins->klass = mono_class_from_mono_type (ftype);
10898 ins->type = STACK_PTR;
10900 } else if (op == CEE_STSFLD) {
10903 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, ftype, ins->dreg, 0, store_val->dreg);
10904 store->flags |= ins_flag;
10906 gboolean is_const = FALSE;
10907 MonoVTable *vtable = NULL;
10908 gpointer addr = NULL;
10910 if (!context_used) {
10911 vtable = mono_class_vtable (cfg->domain, klass);
10912 CHECK_TYPELOAD (klass);
10914 if ((ftype->attrs & FIELD_ATTRIBUTE_INIT_ONLY) && (((addr = mono_aot_readonly_field_override (field)) != NULL) ||
10915 (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) && vtable->initialized))) {
10916 int ro_type = ftype->type;
10918 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
10919 if (ro_type == MONO_TYPE_VALUETYPE && ftype->data.klass->enumtype) {
10920 ro_type = mono_class_enum_basetype (ftype->data.klass)->type;
10923 GSHAREDVT_FAILURE (op);
10925 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
10928 case MONO_TYPE_BOOLEAN:
10930 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
10934 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
10937 case MONO_TYPE_CHAR:
10939 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
10943 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
10948 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
10952 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
10957 case MONO_TYPE_PTR:
10958 case MONO_TYPE_FNPTR:
10959 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
10960 type_to_eval_stack_type ((cfg), field->type, *sp);
10963 case MONO_TYPE_STRING:
10964 case MONO_TYPE_OBJECT:
10965 case MONO_TYPE_CLASS:
10966 case MONO_TYPE_SZARRAY:
10967 case MONO_TYPE_ARRAY:
10968 if (!mono_gc_is_moving ()) {
10969 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
10970 type_to_eval_stack_type ((cfg), field->type, *sp);
10978 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
10983 case MONO_TYPE_VALUETYPE:
10993 CHECK_STACK_OVF (1);
10995 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
10996 load->flags |= ins_flag;
11002 if ((op == CEE_LDFLD || op == CEE_LDSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
11003 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
11004 mini_emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
11015 token = read32 (ip + 1);
11016 klass = mini_get_class (method, token, generic_context);
11017 CHECK_TYPELOAD (klass);
11019 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
11020 mini_emit_memory_store (cfg, &klass->byval_arg, sp [0], sp [1], ins_flag);
11031 const char *data_ptr;
11033 guint32 field_token;
11039 token = read32 (ip + 1);
11041 klass = mini_get_class (method, token, generic_context);
11042 CHECK_TYPELOAD (klass);
11043 if (klass->byval_arg.type == MONO_TYPE_VOID)
11046 context_used = mini_class_check_context_used (cfg, klass);
11048 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
11049 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_OVF_U4);
11050 ins->sreg1 = sp [0]->dreg;
11051 ins->type = STACK_I4;
11052 ins->dreg = alloc_ireg (cfg);
11053 MONO_ADD_INS (cfg->cbb, ins);
11054 *sp = mono_decompose_opcode (cfg, ins);
11057 if (context_used) {
11058 MonoInst *args [3];
11059 MonoClass *array_class = mono_array_class_get (klass, 1);
11060 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class);
11062 /* FIXME: Use OP_NEWARR and decompose later to help abcrem */
11065 args [0] = mini_emit_get_rgctx_klass (cfg, context_used,
11066 array_class, MONO_RGCTX_INFO_VTABLE);
11071 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
11073 ins = mono_emit_jit_icall (cfg, ves_icall_array_new_specific, args);
11075 if (cfg->opt & MONO_OPT_SHARED) {
11076 /* Decompose now to avoid problems with references to the domainvar */
11077 MonoInst *iargs [3];
11079 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11080 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
11081 iargs [2] = sp [0];
11083 ins = mono_emit_jit_icall (cfg, ves_icall_array_new, iargs);
11085 /* Decompose later since it is needed by abcrem */
11086 MonoClass *array_type = mono_array_class_get (klass, 1);
11087 mono_class_vtable (cfg->domain, array_type);
11088 CHECK_TYPELOAD (array_type);
11090 MONO_INST_NEW (cfg, ins, OP_NEWARR);
11091 ins->dreg = alloc_ireg_ref (cfg);
11092 ins->sreg1 = sp [0]->dreg;
11093 ins->inst_newa_class = klass;
11094 ins->type = STACK_OBJ;
11095 ins->klass = array_type;
11096 MONO_ADD_INS (cfg->cbb, ins);
11097 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
11098 cfg->cbb->has_array_access = TRUE;
11100 /* Needed so mono_emit_load_get_addr () gets called */
11101 mono_get_got_var (cfg);
11111 * we inline/optimize the initialization sequence if possible.
11112 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
11113 * for small sizes open code the memcpy
11114 * ensure the rva field is big enough
11116 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, cfg->cbb, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
11117 MonoMethod *memcpy_method = mini_get_memcpy_method ();
11118 MonoInst *iargs [3];
11119 int add_reg = alloc_ireg_mp (cfg);
11121 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, MONO_STRUCT_OFFSET (MonoArray, vector));
11122 if (cfg->compile_aot) {
11123 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
11125 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
11127 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
11128 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
11137 if (sp [0]->type != STACK_OBJ)
11140 MONO_INST_NEW (cfg, ins, OP_LDLEN);
11141 ins->dreg = alloc_preg (cfg);
11142 ins->sreg1 = sp [0]->dreg;
11143 ins->type = STACK_I4;
11144 /* This flag will be inherited by the decomposition */
11145 ins->flags |= MONO_INST_FAULT;
11146 MONO_ADD_INS (cfg->cbb, ins);
11147 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
11148 cfg->cbb->has_array_access = TRUE;
11156 if (sp [0]->type != STACK_OBJ)
11159 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11161 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11162 CHECK_TYPELOAD (klass);
11163 /* we need to make sure that this array is exactly the type it needs
11164 * to be for correctness. the wrappers are lax with their usage
11165 * so we need to ignore them here
11167 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
11168 MonoClass *array_class = mono_array_class_get (klass, 1);
11169 mini_emit_check_array_type (cfg, sp [0], array_class);
11170 CHECK_TYPELOAD (array_class);
11174 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11179 case CEE_LDELEM_I1:
11180 case CEE_LDELEM_U1:
11181 case CEE_LDELEM_I2:
11182 case CEE_LDELEM_U2:
11183 case CEE_LDELEM_I4:
11184 case CEE_LDELEM_U4:
11185 case CEE_LDELEM_I8:
11187 case CEE_LDELEM_R4:
11188 case CEE_LDELEM_R8:
11189 case CEE_LDELEM_REF: {
11195 if (*ip == CEE_LDELEM) {
11197 token = read32 (ip + 1);
11198 klass = mini_get_class (method, token, generic_context);
11199 CHECK_TYPELOAD (klass);
11200 mono_class_init (klass);
11203 klass = array_access_to_klass (*ip);
11205 if (sp [0]->type != STACK_OBJ)
11208 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11210 if (mini_is_gsharedvt_variable_klass (klass)) {
11211 // FIXME-VT: OP_ICONST optimization
11212 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11213 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
11214 ins->opcode = OP_LOADV_MEMBASE;
11215 } else if (sp [1]->opcode == OP_ICONST) {
11216 int array_reg = sp [0]->dreg;
11217 int index_reg = sp [1]->dreg;
11218 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
11220 if (SIZEOF_REGISTER == 8 && COMPILE_LLVM (cfg))
11221 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, index_reg);
11223 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
11224 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
11226 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11227 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
11230 if (*ip == CEE_LDELEM)
11237 case CEE_STELEM_I1:
11238 case CEE_STELEM_I2:
11239 case CEE_STELEM_I4:
11240 case CEE_STELEM_I8:
11241 case CEE_STELEM_R4:
11242 case CEE_STELEM_R8:
11243 case CEE_STELEM_REF:
11248 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11250 if (*ip == CEE_STELEM) {
11252 token = read32 (ip + 1);
11253 klass = mini_get_class (method, token, generic_context);
11254 CHECK_TYPELOAD (klass);
11255 mono_class_init (klass);
11258 klass = array_access_to_klass (*ip);
11260 if (sp [0]->type != STACK_OBJ)
11263 emit_array_store (cfg, klass, sp, TRUE);
11265 if (*ip == CEE_STELEM)
11272 case CEE_CKFINITE: {
11276 if (cfg->llvm_only) {
11277 MonoInst *iargs [1];
11279 iargs [0] = sp [0];
11280 *sp++ = mono_emit_jit_icall (cfg, mono_ckfinite, iargs);
11282 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
11283 ins->sreg1 = sp [0]->dreg;
11284 ins->dreg = alloc_freg (cfg);
11285 ins->type = STACK_R8;
11286 MONO_ADD_INS (cfg->cbb, ins);
11288 *sp++ = mono_decompose_opcode (cfg, ins);
11294 case CEE_REFANYVAL: {
11295 MonoInst *src_var, *src;
11297 int klass_reg = alloc_preg (cfg);
11298 int dreg = alloc_preg (cfg);
11300 GSHAREDVT_FAILURE (*ip);
11303 MONO_INST_NEW (cfg, ins, *ip);
11306 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11307 CHECK_TYPELOAD (klass);
11309 context_used = mini_class_check_context_used (cfg, klass);
11312 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
11314 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
11315 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
11316 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass));
11318 if (context_used) {
11319 MonoInst *klass_ins;
11321 klass_ins = mini_emit_get_rgctx_klass (cfg, context_used,
11322 klass, MONO_RGCTX_INFO_KLASS);
11325 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
11326 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
11328 mini_emit_class_check (cfg, klass_reg, klass);
11330 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value));
11331 ins->type = STACK_MP;
11332 ins->klass = klass;
11337 case CEE_MKREFANY: {
11338 MonoInst *loc, *addr;
11340 GSHAREDVT_FAILURE (*ip);
11343 MONO_INST_NEW (cfg, ins, *ip);
11346 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11347 CHECK_TYPELOAD (klass);
11349 context_used = mini_class_check_context_used (cfg, klass);
11351 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
11352 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
11354 if (context_used) {
11355 MonoInst *const_ins;
11356 int type_reg = alloc_preg (cfg);
11358 const_ins = mini_emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
11359 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
11360 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
11361 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
11363 int const_reg = alloc_preg (cfg);
11364 int type_reg = alloc_preg (cfg);
11366 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
11367 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
11368 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
11369 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
11371 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
11373 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
11374 ins->type = STACK_VTYPE;
11375 ins->klass = mono_defaults.typed_reference_class;
11380 case CEE_LDTOKEN: {
11382 MonoClass *handle_class;
11384 CHECK_STACK_OVF (1);
11387 n = read32 (ip + 1);
11389 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
11390 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
11391 handle = mono_method_get_wrapper_data (method, n);
11392 handle_class = (MonoClass *)mono_method_get_wrapper_data (method, n + 1);
11393 if (handle_class == mono_defaults.typehandle_class)
11394 handle = &((MonoClass*)handle)->byval_arg;
11397 handle = mono_ldtoken_checked (image, n, &handle_class, generic_context, &cfg->error);
11402 mono_class_init (handle_class);
11403 if (cfg->gshared) {
11404 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
11405 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
11406 /* This case handles ldtoken
11407 of an open type, like for
11410 } else if (handle_class == mono_defaults.typehandle_class) {
11411 context_used = mini_class_check_context_used (cfg, mono_class_from_mono_type ((MonoType *)handle));
11412 } else if (handle_class == mono_defaults.fieldhandle_class)
11413 context_used = mini_class_check_context_used (cfg, ((MonoClassField*)handle)->parent);
11414 else if (handle_class == mono_defaults.methodhandle_class)
11415 context_used = mini_method_check_context_used (cfg, (MonoMethod *)handle);
11417 g_assert_not_reached ();
11420 if ((cfg->opt & MONO_OPT_SHARED) &&
11421 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
11422 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
11423 MonoInst *addr, *vtvar, *iargs [3];
11424 int method_context_used;
11426 method_context_used = mini_method_check_context_used (cfg, method);
11428 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
11430 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
11431 EMIT_NEW_ICONST (cfg, iargs [1], n);
11432 if (method_context_used) {
11433 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
11434 method, MONO_RGCTX_INFO_METHOD);
11435 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
11437 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
11438 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
11440 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11442 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
11444 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11446 if ((ip + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 5) &&
11447 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
11448 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
11449 (cmethod->klass == mono_defaults.systemtype_class) &&
11450 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
11451 MonoClass *tclass = mono_class_from_mono_type ((MonoType *)handle);
11453 mono_class_init (tclass);
11454 if (context_used) {
11455 ins = mini_emit_get_rgctx_klass (cfg, context_used,
11456 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
11457 } else if (cfg->compile_aot) {
11458 if (method->wrapper_type) {
11459 error_init (&error); //got to do it since there are multiple conditionals below
11460 if (mono_class_get_checked (tclass->image, tclass->type_token, &error) == tclass && !generic_context) {
11461 /* Special case for static synchronized wrappers */
11462 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
11464 mono_error_cleanup (&error); /* FIXME don't swallow the error */
11465 /* FIXME: n is not a normal token */
11467 EMIT_NEW_PCONST (cfg, ins, NULL);
11470 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
11473 MonoReflectionType *rt = mono_type_get_object_checked (cfg->domain, (MonoType *)handle, &cfg->error);
11475 EMIT_NEW_PCONST (cfg, ins, rt);
11477 ins->type = STACK_OBJ;
11478 ins->klass = cmethod->klass;
11481 MonoInst *addr, *vtvar;
11483 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
11485 if (context_used) {
11486 if (handle_class == mono_defaults.typehandle_class) {
11487 ins = mini_emit_get_rgctx_klass (cfg, context_used,
11488 mono_class_from_mono_type ((MonoType *)handle),
11489 MONO_RGCTX_INFO_TYPE);
11490 } else if (handle_class == mono_defaults.methodhandle_class) {
11491 ins = emit_get_rgctx_method (cfg, context_used,
11492 (MonoMethod *)handle, MONO_RGCTX_INFO_METHOD);
11493 } else if (handle_class == mono_defaults.fieldhandle_class) {
11494 ins = emit_get_rgctx_field (cfg, context_used,
11495 (MonoClassField *)handle, MONO_RGCTX_INFO_CLASS_FIELD);
11497 g_assert_not_reached ();
11499 } else if (cfg->compile_aot) {
11500 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n, generic_context);
11502 EMIT_NEW_PCONST (cfg, ins, handle);
11504 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11505 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
11506 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11516 if (sp [-1]->type != STACK_OBJ)
11519 MONO_INST_NEW (cfg, ins, OP_THROW);
11521 ins->sreg1 = sp [0]->dreg;
11523 cfg->cbb->out_of_line = TRUE;
11524 MONO_ADD_INS (cfg->cbb, ins);
11525 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
11526 MONO_ADD_INS (cfg->cbb, ins);
11529 link_bblock (cfg, cfg->cbb, end_bblock);
11530 start_new_bblock = 1;
11531 /* This can complicate code generation for llvm since the return value might not be defined */
11532 if (COMPILE_LLVM (cfg))
11533 INLINE_FAILURE ("throw");
11535 case CEE_ENDFINALLY:
11536 if (!ip_in_finally_clause (cfg, ip - header->code))
11538 /* mono_save_seq_point_info () depends on this */
11539 if (sp != stack_start)
11540 emit_seq_point (cfg, method, ip, FALSE, FALSE);
11541 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
11542 MONO_ADD_INS (cfg->cbb, ins);
11544 start_new_bblock = 1;
11547 * Control will leave the method so empty the stack, otherwise
11548 * the next basic block will start with a nonempty stack.
11550 while (sp != stack_start) {
11555 case CEE_LEAVE_S: {
11558 if (*ip == CEE_LEAVE) {
11560 target = ip + 5 + (gint32)read32(ip + 1);
11563 target = ip + 2 + (signed char)(ip [1]);
11566 /* empty the stack */
11567 while (sp != stack_start) {
11572 * If this leave statement is in a catch block, check for a
11573 * pending exception, and rethrow it if necessary.
11574 * We avoid doing this in runtime invoke wrappers, since those are called
11575 * by native code which excepts the wrapper to catch all exceptions.
11577 for (i = 0; i < header->num_clauses; ++i) {
11578 MonoExceptionClause *clause = &header->clauses [i];
11581 * Use <= in the final comparison to handle clauses with multiple
11582 * leave statements, like in bug #78024.
11583 * The ordering of the exception clauses guarantees that we find the
11584 * innermost clause.
11586 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
11588 MonoBasicBlock *dont_throw;
11593 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
11596 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
11598 NEW_BBLOCK (cfg, dont_throw);
11601 * Currently, we always rethrow the abort exception, despite the
11602 * fact that this is not correct. See thread6.cs for an example.
11603 * But propagating the abort exception is more important than
11604 * getting the sematics right.
11606 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
11607 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
11608 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
11610 MONO_START_BB (cfg, dont_throw);
11615 cfg->cbb->try_end = (intptr_t)(ip - header->code);
11618 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
11620 MonoExceptionClause *clause;
11622 for (tmp = handlers; tmp; tmp = tmp->next) {
11623 clause = (MonoExceptionClause *)tmp->data;
11624 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
11626 link_bblock (cfg, cfg->cbb, tblock);
11627 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
11628 ins->inst_target_bb = tblock;
11629 ins->inst_eh_block = clause;
11630 MONO_ADD_INS (cfg->cbb, ins);
11631 cfg->cbb->has_call_handler = 1;
11632 if (COMPILE_LLVM (cfg)) {
11633 MonoBasicBlock *target_bb;
11636 * Link the finally bblock with the target, since it will
11637 * conceptually branch there.
11639 GET_BBLOCK (cfg, tblock, cfg->cil_start + clause->handler_offset + clause->handler_len - 1);
11640 GET_BBLOCK (cfg, target_bb, target);
11641 link_bblock (cfg, tblock, target_bb);
11644 g_list_free (handlers);
11647 MONO_INST_NEW (cfg, ins, OP_BR);
11648 MONO_ADD_INS (cfg->cbb, ins);
11649 GET_BBLOCK (cfg, tblock, target);
11650 link_bblock (cfg, cfg->cbb, tblock);
11651 ins->inst_target_bb = tblock;
11653 start_new_bblock = 1;
11655 if (*ip == CEE_LEAVE)
11664 * Mono specific opcodes
11666 case MONO_CUSTOM_PREFIX: {
11668 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
11672 case CEE_MONO_ICALL: {
11674 MonoJitICallInfo *info;
11676 token = read32 (ip + 2);
11677 func = mono_method_get_wrapper_data (method, token);
11678 info = mono_find_jit_icall_by_addr (func);
11680 g_error ("Could not find icall address in wrapper %s", mono_method_full_name (method, 1));
11683 CHECK_STACK (info->sig->param_count);
11684 sp -= info->sig->param_count;
11686 ins = mono_emit_jit_icall (cfg, info->func, sp);
11687 if (!MONO_TYPE_IS_VOID (info->sig->ret))
11691 inline_costs += 10 * num_calls++;
11695 case CEE_MONO_LDPTR_CARD_TABLE:
11696 case CEE_MONO_LDPTR_NURSERY_START:
11697 case CEE_MONO_LDPTR_NURSERY_BITS:
11698 case CEE_MONO_LDPTR_INT_REQ_FLAG: {
11699 CHECK_STACK_OVF (1);
11702 case CEE_MONO_LDPTR_CARD_TABLE:
11703 ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR, NULL);
11705 case CEE_MONO_LDPTR_NURSERY_START:
11706 ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_NURSERY_START, NULL);
11708 case CEE_MONO_LDPTR_NURSERY_BITS:
11709 ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_NURSERY_BITS, NULL);
11711 case CEE_MONO_LDPTR_INT_REQ_FLAG:
11712 ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
11718 inline_costs += 10 * num_calls++;
11721 case CEE_MONO_LDPTR: {
11724 CHECK_STACK_OVF (1);
11726 token = read32 (ip + 2);
11728 ptr = mono_method_get_wrapper_data (method, token);
11729 EMIT_NEW_PCONST (cfg, ins, ptr);
11732 inline_costs += 10 * num_calls++;
11733 /* Can't embed random pointers into AOT code */
11737 case CEE_MONO_JIT_ICALL_ADDR: {
11738 MonoJitICallInfo *callinfo;
11741 CHECK_STACK_OVF (1);
11743 token = read32 (ip + 2);
11745 ptr = mono_method_get_wrapper_data (method, token);
11746 callinfo = mono_find_jit_icall_by_addr (ptr);
11747 g_assert (callinfo);
11748 EMIT_NEW_JIT_ICALL_ADDRCONST (cfg, ins, (char*)callinfo->name);
11751 inline_costs += 10 * num_calls++;
11754 case CEE_MONO_ICALL_ADDR: {
11755 MonoMethod *cmethod;
11758 CHECK_STACK_OVF (1);
11760 token = read32 (ip + 2);
11762 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
11764 if (cfg->compile_aot) {
11765 if (cfg->direct_pinvoke && ip + 6 < end && (ip [6] == CEE_POP)) {
11767 * This is generated by emit_native_wrapper () to resolve the pinvoke address
11768 * before the call, its not needed when using direct pinvoke.
11769 * This is not an optimization, but its used to avoid looking up pinvokes
11770 * on platforms which don't support dlopen ().
11772 EMIT_NEW_PCONST (cfg, ins, NULL);
11774 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
11777 ptr = mono_lookup_internal_call (cmethod);
11779 EMIT_NEW_PCONST (cfg, ins, ptr);
11785 case CEE_MONO_VTADDR: {
11786 MonoInst *src_var, *src;
11792 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
11793 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
11798 case CEE_MONO_NEWOBJ: {
11799 MonoInst *iargs [2];
11801 CHECK_STACK_OVF (1);
11803 token = read32 (ip + 2);
11804 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
11805 mono_class_init (klass);
11806 NEW_DOMAINCONST (cfg, iargs [0]);
11807 MONO_ADD_INS (cfg->cbb, iargs [0]);
11808 NEW_CLASSCONST (cfg, iargs [1], klass);
11809 MONO_ADD_INS (cfg->cbb, iargs [1]);
11810 *sp++ = mono_emit_jit_icall (cfg, ves_icall_object_new, iargs);
11812 inline_costs += 10 * num_calls++;
11815 case CEE_MONO_OBJADDR:
11818 MONO_INST_NEW (cfg, ins, OP_MOVE);
11819 ins->dreg = alloc_ireg_mp (cfg);
11820 ins->sreg1 = sp [0]->dreg;
11821 ins->type = STACK_MP;
11822 MONO_ADD_INS (cfg->cbb, ins);
11826 case CEE_MONO_LDNATIVEOBJ:
11828 * Similar to LDOBJ, but instead load the unmanaged
11829 * representation of the vtype to the stack.
11834 token = read32 (ip + 2);
11835 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
11836 g_assert (klass->valuetype);
11837 mono_class_init (klass);
11840 MonoInst *src, *dest, *temp;
11843 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
11844 temp->backend.is_pinvoke = 1;
11845 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
11846 mini_emit_stobj (cfg, dest, src, klass, TRUE);
11848 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
11849 dest->type = STACK_VTYPE;
11850 dest->klass = klass;
11856 case CEE_MONO_RETOBJ: {
11858 * Same as RET, but return the native representation of a vtype
11861 g_assert (cfg->ret);
11862 g_assert (mono_method_signature (method)->pinvoke);
11867 token = read32 (ip + 2);
11868 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
11870 if (!cfg->vret_addr) {
11871 g_assert (cfg->ret_var_is_local);
11873 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
11875 EMIT_NEW_RETLOADA (cfg, ins);
11877 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
11879 if (sp != stack_start)
11882 MONO_INST_NEW (cfg, ins, OP_BR);
11883 ins->inst_target_bb = end_bblock;
11884 MONO_ADD_INS (cfg->cbb, ins);
11885 link_bblock (cfg, cfg->cbb, end_bblock);
11886 start_new_bblock = 1;
11890 case CEE_MONO_SAVE_LMF:
11891 case CEE_MONO_RESTORE_LMF:
11894 case CEE_MONO_CLASSCONST:
11895 CHECK_STACK_OVF (1);
11897 token = read32 (ip + 2);
11898 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
11901 inline_costs += 10 * num_calls++;
11903 case CEE_MONO_NOT_TAKEN:
11904 cfg->cbb->out_of_line = TRUE;
11907 case CEE_MONO_TLS: {
11910 CHECK_STACK_OVF (1);
11912 key = (MonoTlsKey)read32 (ip + 2);
11913 g_assert (key < TLS_KEY_NUM);
11915 ins = mono_create_tls_get (cfg, key);
11917 ins->type = STACK_PTR;
11922 case CEE_MONO_DYN_CALL: {
11923 MonoCallInst *call;
11925 /* It would be easier to call a trampoline, but that would put an
11926 * extra frame on the stack, confusing exception handling. So
11927 * implement it inline using an opcode for now.
11930 if (!cfg->dyn_call_var) {
11931 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
11932 /* prevent it from being register allocated */
11933 cfg->dyn_call_var->flags |= MONO_INST_VOLATILE;
11936 /* Has to use a call inst since it local regalloc expects it */
11937 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
11938 ins = (MonoInst*)call;
11940 ins->sreg1 = sp [0]->dreg;
11941 ins->sreg2 = sp [1]->dreg;
11942 MONO_ADD_INS (cfg->cbb, ins);
11944 cfg->param_area = MAX (cfg->param_area, cfg->backend->dyn_call_param_area);
11947 inline_costs += 10 * num_calls++;
11951 case CEE_MONO_MEMORY_BARRIER: {
11953 mini_emit_memory_barrier (cfg, (int)read32 (ip + 2));
11957 case CEE_MONO_ATOMIC_STORE_I4: {
11958 g_assert (mono_arch_opcode_supported (OP_ATOMIC_STORE_I4));
11964 MONO_INST_NEW (cfg, ins, OP_ATOMIC_STORE_I4);
11965 ins->dreg = sp [0]->dreg;
11966 ins->sreg1 = sp [1]->dreg;
11967 ins->backend.memory_barrier_kind = (int) read32 (ip + 2);
11968 MONO_ADD_INS (cfg->cbb, ins);
11973 case CEE_MONO_JIT_ATTACH: {
11974 MonoInst *args [16], *domain_ins;
11975 MonoInst *ad_ins, *jit_tls_ins;
11976 MonoBasicBlock *next_bb = NULL, *call_bb = NULL;
11978 g_assert (!mono_threads_is_coop_enabled ());
11980 cfg->orig_domain_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
11982 EMIT_NEW_PCONST (cfg, ins, NULL);
11983 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
11985 ad_ins = mono_create_tls_get (cfg, TLS_KEY_DOMAIN);
11986 jit_tls_ins = mono_create_tls_get (cfg, TLS_KEY_JIT_TLS);
11988 if (ad_ins && jit_tls_ins) {
11989 NEW_BBLOCK (cfg, next_bb);
11990 NEW_BBLOCK (cfg, call_bb);
11992 if (cfg->compile_aot) {
11993 /* AOT code is only used in the root domain */
11994 EMIT_NEW_PCONST (cfg, domain_ins, NULL);
11996 EMIT_NEW_PCONST (cfg, domain_ins, cfg->domain);
11998 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, ad_ins->dreg, domain_ins->dreg);
11999 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, call_bb);
12001 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, jit_tls_ins->dreg, 0);
12002 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, call_bb);
12004 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, next_bb);
12005 MONO_START_BB (cfg, call_bb);
12008 /* AOT code is only used in the root domain */
12009 EMIT_NEW_PCONST (cfg, args [0], cfg->compile_aot ? NULL : cfg->domain);
12010 if (cfg->compile_aot) {
12014 * This is called on unattached threads, so it cannot go through the trampoline
12015 * infrastructure. Use an indirect call through a got slot initialized at load time
12018 EMIT_NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_JIT_THREAD_ATTACH, NULL);
12019 ins = mini_emit_calli (cfg, helper_sig_jit_thread_attach, args, addr, NULL, NULL);
12021 ins = mono_emit_jit_icall (cfg, mono_jit_thread_attach, args);
12023 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
12026 MONO_START_BB (cfg, next_bb);
12031 case CEE_MONO_JIT_DETACH: {
12032 MonoInst *args [16];
12034 /* Restore the original domain */
12035 dreg = alloc_ireg (cfg);
12036 EMIT_NEW_UNALU (cfg, args [0], OP_MOVE, dreg, cfg->orig_domain_var->dreg);
12037 mono_emit_jit_icall (cfg, mono_jit_set_domain, args);
12041 case CEE_MONO_CALLI_EXTRA_ARG: {
12043 MonoMethodSignature *fsig;
12047 * This is the same as CEE_CALLI, but passes an additional argument
12048 * to the called method in llvmonly mode.
12049 * This is only used by delegate invoke wrappers to call the
12050 * actual delegate method.
12052 g_assert (method->wrapper_type == MONO_WRAPPER_DELEGATE_INVOKE);
12055 token = read32 (ip + 2);
12063 fsig = mini_get_signature (method, token, generic_context, &cfg->error);
12066 if (cfg->llvm_only)
12067 cfg->signatures = g_slist_prepend_mempool (cfg->mempool, cfg->signatures, fsig);
12069 n = fsig->param_count + fsig->hasthis + 1;
12076 if (cfg->llvm_only) {
12078 * The lowest bit of 'arg' determines whenever the callee uses the gsharedvt
12079 * cconv. This is set by mono_init_delegate ().
12081 if (cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig)) {
12082 MonoInst *callee = addr;
12083 MonoInst *call, *localloc_ins;
12084 MonoBasicBlock *is_gsharedvt_bb, *end_bb;
12085 int low_bit_reg = alloc_preg (cfg);
12087 NEW_BBLOCK (cfg, is_gsharedvt_bb);
12088 NEW_BBLOCK (cfg, end_bb);
12090 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, low_bit_reg, arg->dreg, 1);
12091 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, low_bit_reg, 0);
12092 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, is_gsharedvt_bb);
12094 /* Normal case: callee uses a normal cconv, have to add an out wrapper */
12095 addr = emit_get_rgctx_sig (cfg, context_used,
12096 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
12098 * ADDR points to a gsharedvt-out wrapper, have to pass <callee, arg> as an extra arg.
12100 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
12101 ins->dreg = alloc_preg (cfg);
12102 ins->inst_imm = 2 * SIZEOF_VOID_P;
12103 MONO_ADD_INS (cfg->cbb, ins);
12104 localloc_ins = ins;
12105 cfg->flags |= MONO_CFG_HAS_ALLOCA;
12106 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, 0, callee->dreg);
12107 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, SIZEOF_VOID_P, arg->dreg);
12109 call = emit_extra_arg_calli (cfg, fsig, sp, localloc_ins->dreg, addr);
12110 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
12112 /* Gsharedvt case: callee uses a gsharedvt cconv, no conversion is needed */
12113 MONO_START_BB (cfg, is_gsharedvt_bb);
12114 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PXOR_IMM, arg->dreg, arg->dreg, 1);
12115 ins = emit_extra_arg_calli (cfg, fsig, sp, arg->dreg, callee);
12116 ins->dreg = call->dreg;
12118 MONO_START_BB (cfg, end_bb);
12120 /* Caller uses a normal calling conv */
12122 MonoInst *callee = addr;
12123 MonoInst *call, *localloc_ins;
12124 MonoBasicBlock *is_gsharedvt_bb, *end_bb;
12125 int low_bit_reg = alloc_preg (cfg);
12127 NEW_BBLOCK (cfg, is_gsharedvt_bb);
12128 NEW_BBLOCK (cfg, end_bb);
12130 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, low_bit_reg, arg->dreg, 1);
12131 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, low_bit_reg, 0);
12132 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, is_gsharedvt_bb);
12134 /* Normal case: callee uses a normal cconv, no conversion is needed */
12135 call = emit_extra_arg_calli (cfg, fsig, sp, arg->dreg, callee);
12136 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
12137 /* Gsharedvt case: callee uses a gsharedvt cconv, have to add an in wrapper */
12138 MONO_START_BB (cfg, is_gsharedvt_bb);
12139 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PXOR_IMM, arg->dreg, arg->dreg, 1);
12140 NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_GSHAREDVT_IN_WRAPPER, fsig);
12141 MONO_ADD_INS (cfg->cbb, addr);
12143 * ADDR points to a gsharedvt-in wrapper, have to pass <callee, arg> as an extra arg.
12145 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
12146 ins->dreg = alloc_preg (cfg);
12147 ins->inst_imm = 2 * SIZEOF_VOID_P;
12148 MONO_ADD_INS (cfg->cbb, ins);
12149 localloc_ins = ins;
12150 cfg->flags |= MONO_CFG_HAS_ALLOCA;
12151 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, 0, callee->dreg);
12152 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, SIZEOF_VOID_P, arg->dreg);
12154 ins = emit_extra_arg_calli (cfg, fsig, sp, localloc_ins->dreg, addr);
12155 ins->dreg = call->dreg;
12156 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
12158 MONO_START_BB (cfg, end_bb);
12161 /* Same as CEE_CALLI */
12162 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
12164 * We pass the address to the gsharedvt trampoline in the rgctx reg
12166 MonoInst *callee = addr;
12168 addr = emit_get_rgctx_sig (cfg, context_used,
12169 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
12170 ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, callee);
12172 ins = (MonoInst*)mini_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
12176 if (!MONO_TYPE_IS_VOID (fsig->ret))
12177 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
12179 CHECK_CFG_EXCEPTION;
12183 constrained_class = NULL;
12186 case CEE_MONO_LDDOMAIN:
12187 CHECK_STACK_OVF (1);
12188 EMIT_NEW_PCONST (cfg, ins, cfg->compile_aot ? NULL : cfg->domain);
12192 case CEE_MONO_GET_LAST_ERROR:
12194 CHECK_STACK_OVF (1);
12196 MONO_INST_NEW (cfg, ins, OP_GET_LAST_ERROR);
12197 ins->dreg = alloc_dreg (cfg, STACK_I4);
12198 ins->type = STACK_I4;
12199 MONO_ADD_INS (cfg->cbb, ins);
12204 case CEE_MONO_GET_RGCTX_ARG:
12206 CHECK_STACK_OVF (1);
12208 mono_create_rgctx_var (cfg);
12210 MONO_INST_NEW (cfg, ins, OP_MOVE);
12211 ins->dreg = alloc_dreg (cfg, STACK_PTR);
12212 ins->sreg1 = cfg->rgctx_var->dreg;
12213 ins->type = STACK_PTR;
12214 MONO_ADD_INS (cfg->cbb, ins);
12220 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
12226 case CEE_PREFIX1: {
12229 case CEE_ARGLIST: {
12230 /* somewhat similar to LDTOKEN */
12231 MonoInst *addr, *vtvar;
12232 CHECK_STACK_OVF (1);
12233 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
12235 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
12236 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
12238 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
12239 ins->type = STACK_VTYPE;
12240 ins->klass = mono_defaults.argumenthandle_class;
12250 MonoInst *cmp, *arg1, *arg2;
12258 * The following transforms:
12259 * CEE_CEQ into OP_CEQ
12260 * CEE_CGT into OP_CGT
12261 * CEE_CGT_UN into OP_CGT_UN
12262 * CEE_CLT into OP_CLT
12263 * CEE_CLT_UN into OP_CLT_UN
12265 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
12267 MONO_INST_NEW (cfg, ins, cmp->opcode);
12268 cmp->sreg1 = arg1->dreg;
12269 cmp->sreg2 = arg2->dreg;
12270 type_from_op (cfg, cmp, arg1, arg2);
12272 add_widen_op (cfg, cmp, &arg1, &arg2);
12273 if ((arg1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((arg1->type == STACK_PTR) || (arg1->type == STACK_OBJ) || (arg1->type == STACK_MP))))
12274 cmp->opcode = OP_LCOMPARE;
12275 else if (arg1->type == STACK_R4)
12276 cmp->opcode = OP_RCOMPARE;
12277 else if (arg1->type == STACK_R8)
12278 cmp->opcode = OP_FCOMPARE;
12280 cmp->opcode = OP_ICOMPARE;
12281 MONO_ADD_INS (cfg->cbb, cmp);
12282 ins->type = STACK_I4;
12283 ins->dreg = alloc_dreg (cfg, (MonoStackType)ins->type);
12284 type_from_op (cfg, ins, arg1, arg2);
12286 if (cmp->opcode == OP_FCOMPARE || cmp->opcode == OP_RCOMPARE) {
12288 * The backends expect the fceq opcodes to do the
12291 ins->sreg1 = cmp->sreg1;
12292 ins->sreg2 = cmp->sreg2;
12295 MONO_ADD_INS (cfg->cbb, ins);
12301 MonoInst *argconst;
12302 MonoMethod *cil_method;
12304 CHECK_STACK_OVF (1);
12306 n = read32 (ip + 2);
12307 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
12310 mono_class_init (cmethod->klass);
12312 mono_save_token_info (cfg, image, n, cmethod);
12314 context_used = mini_method_check_context_used (cfg, cmethod);
12316 cil_method = cmethod;
12317 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
12318 emit_method_access_failure (cfg, method, cil_method);
12320 if (mono_security_core_clr_enabled ())
12321 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
12324 * Optimize the common case of ldftn+delegate creation
12326 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
12327 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
12328 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
12329 MonoInst *target_ins, *handle_ins;
12330 MonoMethod *invoke;
12331 int invoke_context_used;
12333 invoke = mono_get_delegate_invoke (ctor_method->klass);
12334 if (!invoke || !mono_method_signature (invoke))
12337 invoke_context_used = mini_method_check_context_used (cfg, invoke);
12339 target_ins = sp [-1];
12341 if (mono_security_core_clr_enabled ())
12342 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method);
12344 if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
12345 /*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
12346 if (mono_method_signature (invoke)->param_count == mono_method_signature (cmethod)->param_count) {
12347 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0);
12348 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException");
12352 /* FIXME: SGEN support */
12353 if (invoke_context_used == 0 || cfg->llvm_only) {
12355 if (cfg->verbose_level > 3)
12356 g_print ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
12357 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, FALSE))) {
12360 CHECK_CFG_EXCEPTION;
12370 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
12371 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
12375 inline_costs += 10 * num_calls++;
12378 case CEE_LDVIRTFTN: {
12379 MonoInst *args [2];
12383 n = read32 (ip + 2);
12384 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
12387 mono_class_init (cmethod->klass);
12389 context_used = mini_method_check_context_used (cfg, cmethod);
12391 if (mono_security_core_clr_enabled ())
12392 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
12395 * Optimize the common case of ldvirtftn+delegate creation
12397 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 6) && (ip [6] == CEE_NEWOBJ) && (ip > header->code && ip [-1] == CEE_DUP)) {
12398 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
12399 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
12400 MonoInst *target_ins, *handle_ins;
12401 MonoMethod *invoke;
12402 int invoke_context_used;
12403 gboolean is_virtual = cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL;
12405 invoke = mono_get_delegate_invoke (ctor_method->klass);
12406 if (!invoke || !mono_method_signature (invoke))
12409 invoke_context_used = mini_method_check_context_used (cfg, invoke);
12411 target_ins = sp [-1];
12413 if (mono_security_core_clr_enabled ())
12414 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method);
12416 /* FIXME: SGEN support */
12417 if (invoke_context_used == 0 || cfg->llvm_only) {
12419 if (cfg->verbose_level > 3)
12420 g_print ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
12421 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, is_virtual))) {
12424 CHECK_CFG_EXCEPTION;
12437 args [1] = emit_get_rgctx_method (cfg, context_used,
12438 cmethod, MONO_RGCTX_INFO_METHOD);
12441 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
12443 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
12446 inline_costs += 10 * num_calls++;
12450 CHECK_STACK_OVF (1);
12452 n = read16 (ip + 2);
12454 EMIT_NEW_ARGLOAD (cfg, ins, n);
12459 CHECK_STACK_OVF (1);
12461 n = read16 (ip + 2);
12463 NEW_ARGLOADA (cfg, ins, n);
12464 MONO_ADD_INS (cfg->cbb, ins);
12472 n = read16 (ip + 2);
12474 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
12476 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
12480 CHECK_STACK_OVF (1);
12482 n = read16 (ip + 2);
12484 EMIT_NEW_LOCLOAD (cfg, ins, n);
12489 unsigned char *tmp_ip;
12490 CHECK_STACK_OVF (1);
12492 n = read16 (ip + 2);
12495 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
12501 EMIT_NEW_LOCLOADA (cfg, ins, n);
12510 n = read16 (ip + 2);
12512 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
12514 emit_stloc_ir (cfg, sp, header, n);
12518 case CEE_LOCALLOC: {
12520 MonoBasicBlock *non_zero_bb, *end_bb;
12521 int alloc_ptr = alloc_preg (cfg);
12523 if (sp != stack_start)
12525 if (cfg->method != method)
12527 * Inlining this into a loop in a parent could lead to
12528 * stack overflows which is different behavior than the
12529 * non-inlined case, thus disable inlining in this case.
12531 INLINE_FAILURE("localloc");
12533 NEW_BBLOCK (cfg, non_zero_bb);
12534 NEW_BBLOCK (cfg, end_bb);
12536 /* if size != zero */
12537 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
12538 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, non_zero_bb);
12540 //size is zero, so result is NULL
12541 MONO_EMIT_NEW_PCONST (cfg, alloc_ptr, NULL);
12542 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
12544 MONO_START_BB (cfg, non_zero_bb);
12545 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
12546 ins->dreg = alloc_ptr;
12547 ins->sreg1 = sp [0]->dreg;
12548 ins->type = STACK_PTR;
12549 MONO_ADD_INS (cfg->cbb, ins);
12551 cfg->flags |= MONO_CFG_HAS_ALLOCA;
12553 ins->flags |= MONO_INST_INIT;
12555 MONO_START_BB (cfg, end_bb);
12556 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, alloc_preg (cfg), alloc_ptr);
12557 ins->type = STACK_PTR;
12563 case CEE_ENDFILTER: {
12564 MonoExceptionClause *clause, *nearest;
12569 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
12571 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
12572 ins->sreg1 = (*sp)->dreg;
12573 MONO_ADD_INS (cfg->cbb, ins);
12574 start_new_bblock = 1;
12578 for (cc = 0; cc < header->num_clauses; ++cc) {
12579 clause = &header->clauses [cc];
12580 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
12581 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
12582 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset)))
12585 g_assert (nearest);
12586 if ((ip - header->code) != nearest->handler_offset)
12591 case CEE_UNALIGNED_:
12592 ins_flag |= MONO_INST_UNALIGNED;
12593 /* FIXME: record alignment? we can assume 1 for now */
12597 case CEE_VOLATILE_:
12598 ins_flag |= MONO_INST_VOLATILE;
12602 ins_flag |= MONO_INST_TAILCALL;
12603 cfg->flags |= MONO_CFG_HAS_TAIL;
12604 /* Can't inline tail calls at this time */
12605 inline_costs += 100000;
12612 token = read32 (ip + 2);
12613 klass = mini_get_class (method, token, generic_context);
12614 CHECK_TYPELOAD (klass);
12615 if (generic_class_is_reference_type (cfg, klass))
12616 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
12618 mini_emit_initobj (cfg, *sp, NULL, klass);
12622 case CEE_CONSTRAINED_:
12624 token = read32 (ip + 2);
12625 constrained_class = mini_get_class (method, token, generic_context);
12626 CHECK_TYPELOAD (constrained_class);
12632 mini_emit_memory_copy_bytes (cfg, sp [0], sp [1], sp [2], ins_flag);
12640 mini_emit_memory_init_bytes (cfg, sp [0], sp [1], sp [2], ins_flag);
12648 ins_flag |= MONO_INST_NOTYPECHECK;
12650 ins_flag |= MONO_INST_NORANGECHECK;
12651 /* we ignore the no-nullcheck for now since we
12652 * really do it explicitly only when doing callvirt->call
12656 case CEE_RETHROW: {
12658 int handler_offset = -1;
12660 for (i = 0; i < header->num_clauses; ++i) {
12661 MonoExceptionClause *clause = &header->clauses [i];
12662 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
12663 handler_offset = clause->handler_offset;
12668 cfg->cbb->flags |= BB_EXCEPTION_UNSAFE;
12670 if (handler_offset == -1)
12673 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
12674 MONO_INST_NEW (cfg, ins, OP_RETHROW);
12675 ins->sreg1 = load->dreg;
12676 MONO_ADD_INS (cfg->cbb, ins);
12678 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
12679 MONO_ADD_INS (cfg->cbb, ins);
12682 link_bblock (cfg, cfg->cbb, end_bblock);
12683 start_new_bblock = 1;
12691 CHECK_STACK_OVF (1);
12693 token = read32 (ip + 2);
12694 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !image_is_dynamic (method->klass->image) && !generic_context) {
12695 MonoType *type = mono_type_create_from_typespec_checked (image, token, &cfg->error);
12698 val = mono_type_size (type, &ialign);
12700 MonoClass *klass = mini_get_class (method, token, generic_context);
12701 CHECK_TYPELOAD (klass);
12703 val = mono_type_size (&klass->byval_arg, &ialign);
12705 if (mini_is_gsharedvt_klass (klass))
12706 GSHAREDVT_FAILURE (*ip);
12708 EMIT_NEW_ICONST (cfg, ins, val);
12713 case CEE_REFANYTYPE: {
12714 MonoInst *src_var, *src;
12716 GSHAREDVT_FAILURE (*ip);
12722 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
12724 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
12725 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
12726 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type));
12731 case CEE_READONLY_:
12744 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
12754 g_warning ("opcode 0x%02x not handled", *ip);
12758 if (start_new_bblock != 1)
12761 cfg->cbb->cil_length = ip - cfg->cbb->cil_code;
12762 if (cfg->cbb->next_bb) {
12763 /* This could already be set because of inlining, #693905 */
12764 MonoBasicBlock *bb = cfg->cbb;
12766 while (bb->next_bb)
12768 bb->next_bb = end_bblock;
12770 cfg->cbb->next_bb = end_bblock;
12773 if (cfg->method == method && cfg->domainvar) {
12775 MonoInst *get_domain;
12777 cfg->cbb = init_localsbb;
12779 get_domain = mono_create_tls_get (cfg, TLS_KEY_DOMAIN);
12780 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
12781 MONO_ADD_INS (cfg->cbb, store);
12784 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
12785 if (cfg->compile_aot)
12786 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
12787 mono_get_got_var (cfg);
12790 if (cfg->method == method && cfg->got_var)
12791 mono_emit_load_got_addr (cfg);
12793 if (init_localsbb) {
12794 cfg->cbb = init_localsbb;
12796 for (i = 0; i < header->num_locals; ++i) {
12797 emit_init_local (cfg, i, header->locals [i], init_locals);
12801 if (cfg->init_ref_vars && cfg->method == method) {
12802 /* Emit initialization for ref vars */
12803 // FIXME: Avoid duplication initialization for IL locals.
12804 for (i = 0; i < cfg->num_varinfo; ++i) {
12805 MonoInst *ins = cfg->varinfo [i];
12807 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
12808 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
12812 if (cfg->lmf_var && cfg->method == method && !cfg->llvm_only) {
12813 cfg->cbb = init_localsbb;
12814 emit_push_lmf (cfg);
12817 cfg->cbb = init_localsbb;
12818 emit_instrumentation_call (cfg, mono_profiler_method_enter);
12821 MonoBasicBlock *bb;
12824 * Make seq points at backward branch targets interruptable.
12826 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
12827 if (bb->code && bb->in_count > 1 && bb->code->opcode == OP_SEQ_POINT)
12828 bb->code->flags |= MONO_INST_SINGLE_STEP_LOC;
12831 /* Add a sequence point for method entry/exit events */
12832 if (seq_points && cfg->gen_sdb_seq_points) {
12833 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
12834 MONO_ADD_INS (init_localsbb, ins);
12835 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
12836 MONO_ADD_INS (cfg->bb_exit, ins);
12840 * Add seq points for IL offsets which have line number info, but wasn't generated a seq point during JITting because
12841 * the code they refer to was dead (#11880).
12843 if (sym_seq_points) {
12844 for (i = 0; i < header->code_size; ++i) {
12845 if (mono_bitset_test_fast (seq_point_locs, i) && !mono_bitset_test_fast (seq_point_set_locs, i)) {
12848 NEW_SEQ_POINT (cfg, ins, i, FALSE);
12849 mono_add_seq_point (cfg, NULL, ins, SEQ_POINT_NATIVE_OFFSET_DEAD_CODE);
12856 if (cfg->method == method) {
12857 MonoBasicBlock *bb;
12858 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
12859 if (bb == cfg->bb_init)
12862 bb->region = mono_find_block_region (cfg, bb->real_offset);
12864 mono_create_spvar_for_region (cfg, bb->region);
12865 if (cfg->verbose_level > 2)
12866 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
12869 MonoBasicBlock *bb;
12870 /* get_most_deep_clause () in mini-llvm.c depends on this for inlined bblocks */
12871 for (bb = start_bblock; bb != end_bblock; bb = bb->next_bb) {
12872 bb->real_offset = inline_offset;
12876 if (inline_costs < 0) {
12879 /* Method is too large */
12880 mname = mono_method_full_name (method, TRUE);
12881 mono_cfg_set_exception_invalid_program (cfg, g_strdup_printf ("Method %s is too complex.", mname));
12885 if ((cfg->verbose_level > 2) && (cfg->method == method))
12886 mono_print_code (cfg, "AFTER METHOD-TO-IR");
12891 g_assert (!mono_error_ok (&cfg->error));
12895 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
12899 set_exception_type_from_invalid_il (cfg, method, ip);
12903 g_slist_free (class_inits);
12904 mono_basic_block_free (original_bb);
12905 cfg->dont_inline = g_list_remove (cfg->dont_inline, method);
12906 if (cfg->exception_type)
12909 return inline_costs;
12913 store_membase_reg_to_store_membase_imm (int opcode)
12916 case OP_STORE_MEMBASE_REG:
12917 return OP_STORE_MEMBASE_IMM;
12918 case OP_STOREI1_MEMBASE_REG:
12919 return OP_STOREI1_MEMBASE_IMM;
12920 case OP_STOREI2_MEMBASE_REG:
12921 return OP_STOREI2_MEMBASE_IMM;
12922 case OP_STOREI4_MEMBASE_REG:
12923 return OP_STOREI4_MEMBASE_IMM;
12924 case OP_STOREI8_MEMBASE_REG:
12925 return OP_STOREI8_MEMBASE_IMM;
12927 g_assert_not_reached ();
12934 mono_op_to_op_imm (int opcode)
12938 return OP_IADD_IMM;
12940 return OP_ISUB_IMM;
12942 return OP_IDIV_IMM;
12944 return OP_IDIV_UN_IMM;
12946 return OP_IREM_IMM;
12948 return OP_IREM_UN_IMM;
12950 return OP_IMUL_IMM;
12952 return OP_IAND_IMM;
12956 return OP_IXOR_IMM;
12958 return OP_ISHL_IMM;
12960 return OP_ISHR_IMM;
12962 return OP_ISHR_UN_IMM;
12965 return OP_LADD_IMM;
12967 return OP_LSUB_IMM;
12969 return OP_LAND_IMM;
12973 return OP_LXOR_IMM;
12975 return OP_LSHL_IMM;
12977 return OP_LSHR_IMM;
12979 return OP_LSHR_UN_IMM;
12980 #if SIZEOF_REGISTER == 8
12982 return OP_LREM_IMM;
12986 return OP_COMPARE_IMM;
12988 return OP_ICOMPARE_IMM;
12990 return OP_LCOMPARE_IMM;
12992 case OP_STORE_MEMBASE_REG:
12993 return OP_STORE_MEMBASE_IMM;
12994 case OP_STOREI1_MEMBASE_REG:
12995 return OP_STOREI1_MEMBASE_IMM;
12996 case OP_STOREI2_MEMBASE_REG:
12997 return OP_STOREI2_MEMBASE_IMM;
12998 case OP_STOREI4_MEMBASE_REG:
12999 return OP_STOREI4_MEMBASE_IMM;
13001 #if defined(TARGET_X86) || defined (TARGET_AMD64)
13003 return OP_X86_PUSH_IMM;
13004 case OP_X86_COMPARE_MEMBASE_REG:
13005 return OP_X86_COMPARE_MEMBASE_IMM;
13007 #if defined(TARGET_AMD64)
13008 case OP_AMD64_ICOMPARE_MEMBASE_REG:
13009 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
13011 case OP_VOIDCALL_REG:
13012 return OP_VOIDCALL;
13020 return OP_LOCALLOC_IMM;
13027 ldind_to_load_membase (int opcode)
13031 return OP_LOADI1_MEMBASE;
13033 return OP_LOADU1_MEMBASE;
13035 return OP_LOADI2_MEMBASE;
13037 return OP_LOADU2_MEMBASE;
13039 return OP_LOADI4_MEMBASE;
13041 return OP_LOADU4_MEMBASE;
13043 return OP_LOAD_MEMBASE;
13044 case CEE_LDIND_REF:
13045 return OP_LOAD_MEMBASE;
13047 return OP_LOADI8_MEMBASE;
13049 return OP_LOADR4_MEMBASE;
13051 return OP_LOADR8_MEMBASE;
13053 g_assert_not_reached ();
13060 stind_to_store_membase (int opcode)
13064 return OP_STOREI1_MEMBASE_REG;
13066 return OP_STOREI2_MEMBASE_REG;
13068 return OP_STOREI4_MEMBASE_REG;
13070 case CEE_STIND_REF:
13071 return OP_STORE_MEMBASE_REG;
13073 return OP_STOREI8_MEMBASE_REG;
13075 return OP_STORER4_MEMBASE_REG;
13077 return OP_STORER8_MEMBASE_REG;
13079 g_assert_not_reached ();
13086 mono_load_membase_to_load_mem (int opcode)
13088 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
13089 #if defined(TARGET_X86) || defined(TARGET_AMD64)
13091 case OP_LOAD_MEMBASE:
13092 return OP_LOAD_MEM;
13093 case OP_LOADU1_MEMBASE:
13094 return OP_LOADU1_MEM;
13095 case OP_LOADU2_MEMBASE:
13096 return OP_LOADU2_MEM;
13097 case OP_LOADI4_MEMBASE:
13098 return OP_LOADI4_MEM;
13099 case OP_LOADU4_MEMBASE:
13100 return OP_LOADU4_MEM;
13101 #if SIZEOF_REGISTER == 8
13102 case OP_LOADI8_MEMBASE:
13103 return OP_LOADI8_MEM;
13112 op_to_op_dest_membase (int store_opcode, int opcode)
13114 #if defined(TARGET_X86)
13115 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
13120 return OP_X86_ADD_MEMBASE_REG;
13122 return OP_X86_SUB_MEMBASE_REG;
13124 return OP_X86_AND_MEMBASE_REG;
13126 return OP_X86_OR_MEMBASE_REG;
13128 return OP_X86_XOR_MEMBASE_REG;
13131 return OP_X86_ADD_MEMBASE_IMM;
13134 return OP_X86_SUB_MEMBASE_IMM;
13137 return OP_X86_AND_MEMBASE_IMM;
13140 return OP_X86_OR_MEMBASE_IMM;
13143 return OP_X86_XOR_MEMBASE_IMM;
13149 #if defined(TARGET_AMD64)
13150 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
13155 return OP_X86_ADD_MEMBASE_REG;
13157 return OP_X86_SUB_MEMBASE_REG;
13159 return OP_X86_AND_MEMBASE_REG;
13161 return OP_X86_OR_MEMBASE_REG;
13163 return OP_X86_XOR_MEMBASE_REG;
13165 return OP_X86_ADD_MEMBASE_IMM;
13167 return OP_X86_SUB_MEMBASE_IMM;
13169 return OP_X86_AND_MEMBASE_IMM;
13171 return OP_X86_OR_MEMBASE_IMM;
13173 return OP_X86_XOR_MEMBASE_IMM;
13175 return OP_AMD64_ADD_MEMBASE_REG;
13177 return OP_AMD64_SUB_MEMBASE_REG;
13179 return OP_AMD64_AND_MEMBASE_REG;
13181 return OP_AMD64_OR_MEMBASE_REG;
13183 return OP_AMD64_XOR_MEMBASE_REG;
13186 return OP_AMD64_ADD_MEMBASE_IMM;
13189 return OP_AMD64_SUB_MEMBASE_IMM;
13192 return OP_AMD64_AND_MEMBASE_IMM;
13195 return OP_AMD64_OR_MEMBASE_IMM;
13198 return OP_AMD64_XOR_MEMBASE_IMM;
13208 op_to_op_store_membase (int store_opcode, int opcode)
13210 #if defined(TARGET_X86) || defined(TARGET_AMD64)
13213 if (store_opcode == OP_STOREI1_MEMBASE_REG)
13214 return OP_X86_SETEQ_MEMBASE;
13216 if (store_opcode == OP_STOREI1_MEMBASE_REG)
13217 return OP_X86_SETNE_MEMBASE;
13225 op_to_op_src1_membase (MonoCompile *cfg, int load_opcode, int opcode)
13228 /* FIXME: This has sign extension issues */
13230 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
13231 return OP_X86_COMPARE_MEMBASE8_IMM;
13234 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
13239 return OP_X86_PUSH_MEMBASE;
13240 case OP_COMPARE_IMM:
13241 case OP_ICOMPARE_IMM:
13242 return OP_X86_COMPARE_MEMBASE_IMM;
13245 return OP_X86_COMPARE_MEMBASE_REG;
13249 #ifdef TARGET_AMD64
13250 /* FIXME: This has sign extension issues */
13252 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
13253 return OP_X86_COMPARE_MEMBASE8_IMM;
13258 if ((load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32) || (load_opcode == OP_LOADI8_MEMBASE))
13259 return OP_X86_PUSH_MEMBASE;
13261 /* FIXME: This only works for 32 bit immediates
13262 case OP_COMPARE_IMM:
13263 case OP_LCOMPARE_IMM:
13264 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
13265 return OP_AMD64_COMPARE_MEMBASE_IMM;
13267 case OP_ICOMPARE_IMM:
13268 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
13269 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
13273 if (cfg->backend->ilp32 && load_opcode == OP_LOAD_MEMBASE)
13274 return OP_AMD64_ICOMPARE_MEMBASE_REG;
13275 if ((load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32) || (load_opcode == OP_LOADI8_MEMBASE))
13276 return OP_AMD64_COMPARE_MEMBASE_REG;
13279 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
13280 return OP_AMD64_ICOMPARE_MEMBASE_REG;
13289 op_to_op_src2_membase (MonoCompile *cfg, int load_opcode, int opcode)
13292 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
13298 return OP_X86_COMPARE_REG_MEMBASE;
13300 return OP_X86_ADD_REG_MEMBASE;
13302 return OP_X86_SUB_REG_MEMBASE;
13304 return OP_X86_AND_REG_MEMBASE;
13306 return OP_X86_OR_REG_MEMBASE;
13308 return OP_X86_XOR_REG_MEMBASE;
13312 #ifdef TARGET_AMD64
13313 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE && cfg->backend->ilp32)) {
13316 return OP_AMD64_ICOMPARE_REG_MEMBASE;
13318 return OP_X86_ADD_REG_MEMBASE;
13320 return OP_X86_SUB_REG_MEMBASE;
13322 return OP_X86_AND_REG_MEMBASE;
13324 return OP_X86_OR_REG_MEMBASE;
13326 return OP_X86_XOR_REG_MEMBASE;
13328 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32)) {
13332 return OP_AMD64_COMPARE_REG_MEMBASE;
13334 return OP_AMD64_ADD_REG_MEMBASE;
13336 return OP_AMD64_SUB_REG_MEMBASE;
13338 return OP_AMD64_AND_REG_MEMBASE;
13340 return OP_AMD64_OR_REG_MEMBASE;
13342 return OP_AMD64_XOR_REG_MEMBASE;
13351 mono_op_to_op_imm_noemul (int opcode)
13354 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
13360 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
13367 #if defined(MONO_ARCH_EMULATE_MUL_DIV)
13372 return mono_op_to_op_imm (opcode);
13377 * mono_handle_global_vregs:
13379 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
13383 mono_handle_global_vregs (MonoCompile *cfg)
13385 gint32 *vreg_to_bb;
13386 MonoBasicBlock *bb;
13389 vreg_to_bb = (gint32 *)mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
13391 #ifdef MONO_ARCH_SIMD_INTRINSICS
13392 if (cfg->uses_simd_intrinsics)
13393 mono_simd_simplify_indirection (cfg);
13396 /* Find local vregs used in more than one bb */
13397 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13398 MonoInst *ins = bb->code;
13399 int block_num = bb->block_num;
13401 if (cfg->verbose_level > 2)
13402 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
13405 for (; ins; ins = ins->next) {
13406 const char *spec = INS_INFO (ins->opcode);
13407 int regtype = 0, regindex;
13410 if (G_UNLIKELY (cfg->verbose_level > 2))
13411 mono_print_ins (ins);
13413 g_assert (ins->opcode >= MONO_CEE_LAST);
13415 for (regindex = 0; regindex < 4; regindex ++) {
13418 if (regindex == 0) {
13419 regtype = spec [MONO_INST_DEST];
13420 if (regtype == ' ')
13423 } else if (regindex == 1) {
13424 regtype = spec [MONO_INST_SRC1];
13425 if (regtype == ' ')
13428 } else if (regindex == 2) {
13429 regtype = spec [MONO_INST_SRC2];
13430 if (regtype == ' ')
13433 } else if (regindex == 3) {
13434 regtype = spec [MONO_INST_SRC3];
13435 if (regtype == ' ')
13440 #if SIZEOF_REGISTER == 4
13441 /* In the LLVM case, the long opcodes are not decomposed */
13442 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
13444 * Since some instructions reference the original long vreg,
13445 * and some reference the two component vregs, it is quite hard
13446 * to determine when it needs to be global. So be conservative.
13448 if (!get_vreg_to_inst (cfg, vreg)) {
13449 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
13451 if (cfg->verbose_level > 2)
13452 printf ("LONG VREG R%d made global.\n", vreg);
13456 * Make the component vregs volatile since the optimizations can
13457 * get confused otherwise.
13459 get_vreg_to_inst (cfg, MONO_LVREG_LS (vreg))->flags |= MONO_INST_VOLATILE;
13460 get_vreg_to_inst (cfg, MONO_LVREG_MS (vreg))->flags |= MONO_INST_VOLATILE;
13464 g_assert (vreg != -1);
13466 prev_bb = vreg_to_bb [vreg];
13467 if (prev_bb == 0) {
13468 /* 0 is a valid block num */
13469 vreg_to_bb [vreg] = block_num + 1;
13470 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
13471 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
13474 if (!get_vreg_to_inst (cfg, vreg)) {
13475 if (G_UNLIKELY (cfg->verbose_level > 2))
13476 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
13480 if (vreg_is_ref (cfg, vreg))
13481 mono_compile_create_var_for_vreg (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL, vreg);
13483 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
13486 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
13489 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
13493 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
13496 g_assert_not_reached ();
13500 /* Flag as having been used in more than one bb */
13501 vreg_to_bb [vreg] = -1;
13507 /* If a variable is used in only one bblock, convert it into a local vreg */
13508 for (i = 0; i < cfg->num_varinfo; i++) {
13509 MonoInst *var = cfg->varinfo [i];
13510 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
13512 switch (var->type) {
13518 #if SIZEOF_REGISTER == 8
13521 #if !defined(TARGET_X86)
13522 /* Enabling this screws up the fp stack on x86 */
13525 if (mono_arch_is_soft_float ())
13529 if (var->type == STACK_VTYPE && cfg->gsharedvt && mini_is_gsharedvt_variable_type (var->inst_vtype))
13533 /* Arguments are implicitly global */
13534 /* Putting R4 vars into registers doesn't work currently */
13535 /* The gsharedvt vars are implicitly referenced by ldaddr opcodes, but those opcodes are only generated later */
13536 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg && var != cfg->gsharedvt_info_var && var != cfg->gsharedvt_locals_var && var != cfg->lmf_addr_var) {
13538 * Make that the variable's liveness interval doesn't contain a call, since
13539 * that would cause the lvreg to be spilled, making the whole optimization
13542 /* This is too slow for JIT compilation */
13544 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
13546 int def_index, call_index, ins_index;
13547 gboolean spilled = FALSE;
13552 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
13553 const char *spec = INS_INFO (ins->opcode);
13555 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
13556 def_index = ins_index;
13558 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
13559 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
13560 if (call_index > def_index) {
13566 if (MONO_IS_CALL (ins))
13567 call_index = ins_index;
13577 if (G_UNLIKELY (cfg->verbose_level > 2))
13578 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
13579 var->flags |= MONO_INST_IS_DEAD;
13580 cfg->vreg_to_inst [var->dreg] = NULL;
13587 * Compress the varinfo and vars tables so the liveness computation is faster and
13588 * takes up less space.
13591 for (i = 0; i < cfg->num_varinfo; ++i) {
13592 MonoInst *var = cfg->varinfo [i];
13593 if (pos < i && cfg->locals_start == i)
13594 cfg->locals_start = pos;
13595 if (!(var->flags & MONO_INST_IS_DEAD)) {
13597 cfg->varinfo [pos] = cfg->varinfo [i];
13598 cfg->varinfo [pos]->inst_c0 = pos;
13599 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
13600 cfg->vars [pos].idx = pos;
13601 #if SIZEOF_REGISTER == 4
13602 if (cfg->varinfo [pos]->type == STACK_I8) {
13603 /* Modify the two component vars too */
13606 var1 = get_vreg_to_inst (cfg, MONO_LVREG_LS (cfg->varinfo [pos]->dreg));
13607 var1->inst_c0 = pos;
13608 var1 = get_vreg_to_inst (cfg, MONO_LVREG_MS (cfg->varinfo [pos]->dreg));
13609 var1->inst_c0 = pos;
13616 cfg->num_varinfo = pos;
13617 if (cfg->locals_start > cfg->num_varinfo)
13618 cfg->locals_start = cfg->num_varinfo;
13622 * mono_allocate_gsharedvt_vars:
13624 * Allocate variables with gsharedvt types to entries in the MonoGSharedVtMethodRuntimeInfo.entries array.
13625 * Initialize cfg->gsharedvt_vreg_to_idx with the mapping between vregs and indexes.
13628 mono_allocate_gsharedvt_vars (MonoCompile *cfg)
13632 cfg->gsharedvt_vreg_to_idx = (int *)mono_mempool_alloc0 (cfg->mempool, sizeof (int) * cfg->next_vreg);
13634 for (i = 0; i < cfg->num_varinfo; ++i) {
13635 MonoInst *ins = cfg->varinfo [i];
13638 if (mini_is_gsharedvt_variable_type (ins->inst_vtype)) {
13639 if (i >= cfg->locals_start) {
13641 idx = get_gsharedvt_info_slot (cfg, ins->inst_vtype, MONO_RGCTX_INFO_LOCAL_OFFSET);
13642 cfg->gsharedvt_vreg_to_idx [ins->dreg] = idx + 1;
13643 ins->opcode = OP_GSHAREDVT_LOCAL;
13644 ins->inst_imm = idx;
13647 cfg->gsharedvt_vreg_to_idx [ins->dreg] = -1;
13648 ins->opcode = OP_GSHAREDVT_ARG_REGOFFSET;
13655 * mono_spill_global_vars:
13657 * Generate spill code for variables which are not allocated to registers,
13658 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
13659 * code is generated which could be optimized by the local optimization passes.
13662 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
13664 MonoBasicBlock *bb;
13666 int orig_next_vreg;
13667 guint32 *vreg_to_lvreg;
13669 guint32 i, lvregs_len, lvregs_size;
13670 gboolean dest_has_lvreg = FALSE;
13671 MonoStackType stacktypes [128];
13672 MonoInst **live_range_start, **live_range_end;
13673 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
13675 *need_local_opts = FALSE;
13677 memset (spec2, 0, sizeof (spec2));
13679 /* FIXME: Move this function to mini.c */
13680 stacktypes ['i'] = STACK_PTR;
13681 stacktypes ['l'] = STACK_I8;
13682 stacktypes ['f'] = STACK_R8;
13683 #ifdef MONO_ARCH_SIMD_INTRINSICS
13684 stacktypes ['x'] = STACK_VTYPE;
13687 #if SIZEOF_REGISTER == 4
13688 /* Create MonoInsts for longs */
13689 for (i = 0; i < cfg->num_varinfo; i++) {
13690 MonoInst *ins = cfg->varinfo [i];
13692 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
13693 switch (ins->type) {
13698 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
13701 g_assert (ins->opcode == OP_REGOFFSET);
13703 tree = get_vreg_to_inst (cfg, MONO_LVREG_LS (ins->dreg));
13705 tree->opcode = OP_REGOFFSET;
13706 tree->inst_basereg = ins->inst_basereg;
13707 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
13709 tree = get_vreg_to_inst (cfg, MONO_LVREG_MS (ins->dreg));
13711 tree->opcode = OP_REGOFFSET;
13712 tree->inst_basereg = ins->inst_basereg;
13713 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
13723 if (cfg->compute_gc_maps) {
13724 /* registers need liveness info even for !non refs */
13725 for (i = 0; i < cfg->num_varinfo; i++) {
13726 MonoInst *ins = cfg->varinfo [i];
13728 if (ins->opcode == OP_REGVAR)
13729 ins->flags |= MONO_INST_GC_TRACK;
13733 /* FIXME: widening and truncation */
13736 * As an optimization, when a variable allocated to the stack is first loaded into
13737 * an lvreg, we will remember the lvreg and use it the next time instead of loading
13738 * the variable again.
13740 orig_next_vreg = cfg->next_vreg;
13741 vreg_to_lvreg = (guint32 *)mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
13742 lvregs_size = 1024;
13743 lvregs = (guint32 *)mono_mempool_alloc (cfg->mempool, sizeof (guint32) * lvregs_size);
13747 * These arrays contain the first and last instructions accessing a given
13749 * Since we emit bblocks in the same order we process them here, and we
13750 * don't split live ranges, these will precisely describe the live range of
13751 * the variable, i.e. the instruction range where a valid value can be found
13752 * in the variables location.
13753 * The live range is computed using the liveness info computed by the liveness pass.
13754 * We can't use vmv->range, since that is an abstract live range, and we need
13755 * one which is instruction precise.
13756 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
13758 /* FIXME: Only do this if debugging info is requested */
13759 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
13760 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
13761 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
13762 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
13764 /* Add spill loads/stores */
13765 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13768 if (cfg->verbose_level > 2)
13769 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
13771 /* Clear vreg_to_lvreg array */
13772 for (i = 0; i < lvregs_len; i++)
13773 vreg_to_lvreg [lvregs [i]] = 0;
13777 MONO_BB_FOR_EACH_INS (bb, ins) {
13778 const char *spec = INS_INFO (ins->opcode);
13779 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
13780 gboolean store, no_lvreg;
13781 int sregs [MONO_MAX_SRC_REGS];
13783 if (G_UNLIKELY (cfg->verbose_level > 2))
13784 mono_print_ins (ins);
13786 if (ins->opcode == OP_NOP)
13790 * We handle LDADDR here as well, since it can only be decomposed
13791 * when variable addresses are known.
13793 if (ins->opcode == OP_LDADDR) {
13794 MonoInst *var = (MonoInst *)ins->inst_p0;
13796 if (var->opcode == OP_VTARG_ADDR) {
13797 /* Happens on SPARC/S390 where vtypes are passed by reference */
13798 MonoInst *vtaddr = var->inst_left;
13799 if (vtaddr->opcode == OP_REGVAR) {
13800 ins->opcode = OP_MOVE;
13801 ins->sreg1 = vtaddr->dreg;
13803 else if (var->inst_left->opcode == OP_REGOFFSET) {
13804 ins->opcode = OP_LOAD_MEMBASE;
13805 ins->inst_basereg = vtaddr->inst_basereg;
13806 ins->inst_offset = vtaddr->inst_offset;
13809 } else if (cfg->gsharedvt && cfg->gsharedvt_vreg_to_idx [var->dreg] < 0) {
13810 /* gsharedvt arg passed by ref */
13811 g_assert (var->opcode == OP_GSHAREDVT_ARG_REGOFFSET);
13813 ins->opcode = OP_LOAD_MEMBASE;
13814 ins->inst_basereg = var->inst_basereg;
13815 ins->inst_offset = var->inst_offset;
13816 } else if (cfg->gsharedvt && cfg->gsharedvt_vreg_to_idx [var->dreg]) {
13817 MonoInst *load, *load2, *load3;
13818 int idx = cfg->gsharedvt_vreg_to_idx [var->dreg] - 1;
13819 int reg1, reg2, reg3;
13820 MonoInst *info_var = cfg->gsharedvt_info_var;
13821 MonoInst *locals_var = cfg->gsharedvt_locals_var;
13825 * Compute the address of the local as gsharedvt_locals_var + gsharedvt_info_var->locals_offsets [idx].
13828 g_assert (var->opcode == OP_GSHAREDVT_LOCAL);
13830 g_assert (info_var);
13831 g_assert (locals_var);
13833 /* Mark the instruction used to compute the locals var as used */
13834 cfg->gsharedvt_locals_var_ins = NULL;
13836 /* Load the offset */
13837 if (info_var->opcode == OP_REGOFFSET) {
13838 reg1 = alloc_ireg (cfg);
13839 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, reg1, info_var->inst_basereg, info_var->inst_offset);
13840 } else if (info_var->opcode == OP_REGVAR) {
13842 reg1 = info_var->dreg;
13844 g_assert_not_reached ();
13846 reg2 = alloc_ireg (cfg);
13847 NEW_LOAD_MEMBASE (cfg, load2, OP_LOADI4_MEMBASE, reg2, reg1, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
13848 /* Load the locals area address */
13849 reg3 = alloc_ireg (cfg);
13850 if (locals_var->opcode == OP_REGOFFSET) {
13851 NEW_LOAD_MEMBASE (cfg, load3, OP_LOAD_MEMBASE, reg3, locals_var->inst_basereg, locals_var->inst_offset);
13852 } else if (locals_var->opcode == OP_REGVAR) {
13853 NEW_UNALU (cfg, load3, OP_MOVE, reg3, locals_var->dreg);
13855 g_assert_not_reached ();
13857 /* Compute the address */
13858 ins->opcode = OP_PADD;
13862 mono_bblock_insert_before_ins (bb, ins, load3);
13863 mono_bblock_insert_before_ins (bb, load3, load2);
13865 mono_bblock_insert_before_ins (bb, load2, load);
13867 g_assert (var->opcode == OP_REGOFFSET);
13869 ins->opcode = OP_ADD_IMM;
13870 ins->sreg1 = var->inst_basereg;
13871 ins->inst_imm = var->inst_offset;
13874 *need_local_opts = TRUE;
13875 spec = INS_INFO (ins->opcode);
13878 if (ins->opcode < MONO_CEE_LAST) {
13879 mono_print_ins (ins);
13880 g_assert_not_reached ();
13884 * Store opcodes have destbasereg in the dreg, but in reality, it is an
13888 if (MONO_IS_STORE_MEMBASE (ins)) {
13889 tmp_reg = ins->dreg;
13890 ins->dreg = ins->sreg2;
13891 ins->sreg2 = tmp_reg;
13894 spec2 [MONO_INST_DEST] = ' ';
13895 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
13896 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
13897 spec2 [MONO_INST_SRC3] = ' ';
13899 } else if (MONO_IS_STORE_MEMINDEX (ins))
13900 g_assert_not_reached ();
13905 if (G_UNLIKELY (cfg->verbose_level > 2)) {
13906 printf ("\t %.3s %d", spec, ins->dreg);
13907 num_sregs = mono_inst_get_src_registers (ins, sregs);
13908 for (srcindex = 0; srcindex < num_sregs; ++srcindex)
13909 printf (" %d", sregs [srcindex]);
13916 regtype = spec [MONO_INST_DEST];
13917 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
13920 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
13921 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
13922 MonoInst *store_ins;
13924 MonoInst *def_ins = ins;
13925 int dreg = ins->dreg; /* The original vreg */
13927 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
13929 if (var->opcode == OP_REGVAR) {
13930 ins->dreg = var->dreg;
13931 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
13933 * Instead of emitting a load+store, use a _membase opcode.
13935 g_assert (var->opcode == OP_REGOFFSET);
13936 if (ins->opcode == OP_MOVE) {
13940 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
13941 ins->inst_basereg = var->inst_basereg;
13942 ins->inst_offset = var->inst_offset;
13945 spec = INS_INFO (ins->opcode);
13949 g_assert (var->opcode == OP_REGOFFSET);
13951 prev_dreg = ins->dreg;
13953 /* Invalidate any previous lvreg for this vreg */
13954 vreg_to_lvreg [ins->dreg] = 0;
13958 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
13960 store_opcode = OP_STOREI8_MEMBASE_REG;
13963 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
13965 #if SIZEOF_REGISTER != 8
13966 if (regtype == 'l') {
13967 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, MONO_LVREG_LS (ins->dreg));
13968 mono_bblock_insert_after_ins (bb, ins, store_ins);
13969 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, MONO_LVREG_MS (ins->dreg));
13970 mono_bblock_insert_after_ins (bb, ins, store_ins);
13971 def_ins = store_ins;
13976 g_assert (store_opcode != OP_STOREV_MEMBASE);
13978 /* Try to fuse the store into the instruction itself */
13979 /* FIXME: Add more instructions */
13980 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
13981 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
13982 ins->inst_imm = ins->inst_c0;
13983 ins->inst_destbasereg = var->inst_basereg;
13984 ins->inst_offset = var->inst_offset;
13985 spec = INS_INFO (ins->opcode);
13986 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE) || (ins->opcode == OP_RMOVE))) {
13987 ins->opcode = store_opcode;
13988 ins->inst_destbasereg = var->inst_basereg;
13989 ins->inst_offset = var->inst_offset;
13993 tmp_reg = ins->dreg;
13994 ins->dreg = ins->sreg2;
13995 ins->sreg2 = tmp_reg;
13998 spec2 [MONO_INST_DEST] = ' ';
13999 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
14000 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
14001 spec2 [MONO_INST_SRC3] = ' ';
14003 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
14004 // FIXME: The backends expect the base reg to be in inst_basereg
14005 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
14007 ins->inst_basereg = var->inst_basereg;
14008 ins->inst_offset = var->inst_offset;
14009 spec = INS_INFO (ins->opcode);
14011 /* printf ("INS: "); mono_print_ins (ins); */
14012 /* Create a store instruction */
14013 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
14015 /* Insert it after the instruction */
14016 mono_bblock_insert_after_ins (bb, ins, store_ins);
14018 def_ins = store_ins;
14021 * We can't assign ins->dreg to var->dreg here, since the
14022 * sregs could use it. So set a flag, and do it after
14025 if ((!cfg->backend->use_fpstack || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
14026 dest_has_lvreg = TRUE;
14031 if (def_ins && !live_range_start [dreg]) {
14032 live_range_start [dreg] = def_ins;
14033 live_range_start_bb [dreg] = bb;
14036 if (cfg->compute_gc_maps && def_ins && (var->flags & MONO_INST_GC_TRACK)) {
14039 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_DEF);
14040 tmp->inst_c1 = dreg;
14041 mono_bblock_insert_after_ins (bb, def_ins, tmp);
14048 num_sregs = mono_inst_get_src_registers (ins, sregs);
14049 for (srcindex = 0; srcindex < 3; ++srcindex) {
14050 regtype = spec [MONO_INST_SRC1 + srcindex];
14051 sreg = sregs [srcindex];
14053 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
14054 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
14055 MonoInst *var = get_vreg_to_inst (cfg, sreg);
14056 MonoInst *use_ins = ins;
14057 MonoInst *load_ins;
14058 guint32 load_opcode;
14060 if (var->opcode == OP_REGVAR) {
14061 sregs [srcindex] = var->dreg;
14062 //mono_inst_set_src_registers (ins, sregs);
14063 live_range_end [sreg] = use_ins;
14064 live_range_end_bb [sreg] = bb;
14066 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
14069 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
14070 /* var->dreg is a hreg */
14071 tmp->inst_c1 = sreg;
14072 mono_bblock_insert_after_ins (bb, ins, tmp);
14078 g_assert (var->opcode == OP_REGOFFSET);
14080 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
14082 g_assert (load_opcode != OP_LOADV_MEMBASE);
14084 if (vreg_to_lvreg [sreg]) {
14085 g_assert (vreg_to_lvreg [sreg] != -1);
14087 /* The variable is already loaded to an lvreg */
14088 if (G_UNLIKELY (cfg->verbose_level > 2))
14089 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
14090 sregs [srcindex] = vreg_to_lvreg [sreg];
14091 //mono_inst_set_src_registers (ins, sregs);
14095 /* Try to fuse the load into the instruction */
14096 if ((srcindex == 0) && (op_to_op_src1_membase (cfg, load_opcode, ins->opcode) != -1)) {
14097 ins->opcode = op_to_op_src1_membase (cfg, load_opcode, ins->opcode);
14098 sregs [0] = var->inst_basereg;
14099 //mono_inst_set_src_registers (ins, sregs);
14100 ins->inst_offset = var->inst_offset;
14101 } else if ((srcindex == 1) && (op_to_op_src2_membase (cfg, load_opcode, ins->opcode) != -1)) {
14102 ins->opcode = op_to_op_src2_membase (cfg, load_opcode, ins->opcode);
14103 sregs [1] = var->inst_basereg;
14104 //mono_inst_set_src_registers (ins, sregs);
14105 ins->inst_offset = var->inst_offset;
14107 if (MONO_IS_REAL_MOVE (ins)) {
14108 ins->opcode = OP_NOP;
14111 //printf ("%d ", srcindex); mono_print_ins (ins);
14113 sreg = alloc_dreg (cfg, stacktypes [regtype]);
14115 if ((!cfg->backend->use_fpstack || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
14116 if (var->dreg == prev_dreg) {
14118 * sreg refers to the value loaded by the load
14119 * emitted below, but we need to use ins->dreg
14120 * since it refers to the store emitted earlier.
14124 g_assert (sreg != -1);
14125 vreg_to_lvreg [var->dreg] = sreg;
14126 if (lvregs_len >= lvregs_size) {
14127 guint32 *new_lvregs = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * lvregs_size * 2);
14128 memcpy (new_lvregs, lvregs, sizeof (guint32) * lvregs_size);
14129 lvregs = new_lvregs;
14132 lvregs [lvregs_len ++] = var->dreg;
14136 sregs [srcindex] = sreg;
14137 //mono_inst_set_src_registers (ins, sregs);
14139 #if SIZEOF_REGISTER != 8
14140 if (regtype == 'l') {
14141 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, MONO_LVREG_MS (sreg), var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
14142 mono_bblock_insert_before_ins (bb, ins, load_ins);
14143 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, MONO_LVREG_LS (sreg), var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
14144 mono_bblock_insert_before_ins (bb, ins, load_ins);
14145 use_ins = load_ins;
14150 #if SIZEOF_REGISTER == 4
14151 g_assert (load_opcode != OP_LOADI8_MEMBASE);
14153 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
14154 mono_bblock_insert_before_ins (bb, ins, load_ins);
14155 use_ins = load_ins;
14159 if (var->dreg < orig_next_vreg) {
14160 live_range_end [var->dreg] = use_ins;
14161 live_range_end_bb [var->dreg] = bb;
14164 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
14167 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
14168 tmp->inst_c1 = var->dreg;
14169 mono_bblock_insert_after_ins (bb, ins, tmp);
14173 mono_inst_set_src_registers (ins, sregs);
14175 if (dest_has_lvreg) {
14176 g_assert (ins->dreg != -1);
14177 vreg_to_lvreg [prev_dreg] = ins->dreg;
14178 if (lvregs_len >= lvregs_size) {
14179 guint32 *new_lvregs = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * lvregs_size * 2);
14180 memcpy (new_lvregs, lvregs, sizeof (guint32) * lvregs_size);
14181 lvregs = new_lvregs;
14184 lvregs [lvregs_len ++] = prev_dreg;
14185 dest_has_lvreg = FALSE;
14189 tmp_reg = ins->dreg;
14190 ins->dreg = ins->sreg2;
14191 ins->sreg2 = tmp_reg;
14194 if (MONO_IS_CALL (ins)) {
14195 /* Clear vreg_to_lvreg array */
14196 for (i = 0; i < lvregs_len; i++)
14197 vreg_to_lvreg [lvregs [i]] = 0;
14199 } else if (ins->opcode == OP_NOP) {
14201 MONO_INST_NULLIFY_SREGS (ins);
14204 if (cfg->verbose_level > 2)
14205 mono_print_ins_index (1, ins);
14208 /* Extend the live range based on the liveness info */
14209 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
14210 for (i = 0; i < cfg->num_varinfo; i ++) {
14211 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
14213 if (vreg_is_volatile (cfg, vi->vreg))
14214 /* The liveness info is incomplete */
14217 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
14218 /* Live from at least the first ins of this bb */
14219 live_range_start [vi->vreg] = bb->code;
14220 live_range_start_bb [vi->vreg] = bb;
14223 if (mono_bitset_test_fast (bb->live_out_set, i)) {
14224 /* Live at least until the last ins of this bb */
14225 live_range_end [vi->vreg] = bb->last_ins;
14226 live_range_end_bb [vi->vreg] = bb;
14233 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
14234 * by storing the current native offset into MonoMethodVar->live_range_start/end.
14236 if (cfg->backend->have_liverange_ops && cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
14237 for (i = 0; i < cfg->num_varinfo; ++i) {
14238 int vreg = MONO_VARINFO (cfg, i)->vreg;
14241 if (live_range_start [vreg]) {
14242 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
14244 ins->inst_c1 = vreg;
14245 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
14247 if (live_range_end [vreg]) {
14248 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
14250 ins->inst_c1 = vreg;
14251 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
14252 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
14254 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
14259 if (cfg->gsharedvt_locals_var_ins) {
14260 /* Nullify if unused */
14261 cfg->gsharedvt_locals_var_ins->opcode = OP_PCONST;
14262 cfg->gsharedvt_locals_var_ins->inst_imm = 0;
14265 g_free (live_range_start);
14266 g_free (live_range_end);
14267 g_free (live_range_start_bb);
14268 g_free (live_range_end_bb);
14274 * - use 'iadd' instead of 'int_add'
14275 * - handling ovf opcodes: decompose in method_to_ir.
14276 * - unify iregs/fregs
14277 * -> partly done, the missing parts are:
14278 * - a more complete unification would involve unifying the hregs as well, so
14279 * code wouldn't need if (fp) all over the place. but that would mean the hregs
14280 * would no longer map to the machine hregs, so the code generators would need to
14281 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
14282 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
14283 * fp/non-fp branches speeds it up by about 15%.
14284 * - use sext/zext opcodes instead of shifts
14286 * - get rid of TEMPLOADs if possible and use vregs instead
14287 * - clean up usage of OP_P/OP_ opcodes
14288 * - cleanup usage of DUMMY_USE
14289 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
14291 * - set the stack type and allocate a dreg in the EMIT_NEW macros
14292 * - get rid of all the <foo>2 stuff when the new JIT is ready.
14293 * - make sure handle_stack_args () is called before the branch is emitted
14294 * - when the new IR is done, get rid of all unused stuff
14295 * - COMPARE/BEQ as separate instructions or unify them ?
14296 * - keeping them separate allows specialized compare instructions like
14297 * compare_imm, compare_membase
14298 * - most back ends unify fp compare+branch, fp compare+ceq
14299 * - integrate mono_save_args into inline_method
14300 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
14301 * - handle long shift opts on 32 bit platforms somehow: they require
14302 * 3 sregs (2 for arg1 and 1 for arg2)
14303 * - make byref a 'normal' type.
14304 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
14305 * variable if needed.
14306 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
14307 * like inline_method.
14308 * - remove inlining restrictions
14309 * - fix LNEG and enable cfold of INEG
14310 * - generalize x86 optimizations like ldelema as a peephole optimization
14311 * - add store_mem_imm for amd64
14312 * - optimize the loading of the interruption flag in the managed->native wrappers
14313 * - avoid special handling of OP_NOP in passes
14314 * - move code inserting instructions into one function/macro.
14315 * - try a coalescing phase after liveness analysis
14316 * - add float -> vreg conversion + local optimizations on !x86
14317 * - figure out how to handle decomposed branches during optimizations, ie.
14318 * compare+branch, op_jump_table+op_br etc.
14319 * - promote RuntimeXHandles to vregs
14320 * - vtype cleanups:
14321 * - add a NEW_VARLOADA_VREG macro
14322 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
14323 * accessing vtype fields.
14324 * - get rid of I8CONST on 64 bit platforms
14325 * - dealing with the increase in code size due to branches created during opcode
14327 * - use extended basic blocks
14328 * - all parts of the JIT
14329 * - handle_global_vregs () && local regalloc
14330 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
14331 * - sources of increase in code size:
14334 * - isinst and castclass
14335 * - lvregs not allocated to global registers even if used multiple times
14336 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
14338 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
14339 * - add all micro optimizations from the old JIT
14340 * - put tree optimizations into the deadce pass
14341 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
14342 * specific function.
14343 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
14344 * fcompare + branchCC.
14345 * - create a helper function for allocating a stack slot, taking into account
14346 * MONO_CFG_HAS_SPILLUP.
14348 * - merge the ia64 switch changes.
14349 * - optimize mono_regstate2_alloc_int/float.
14350 * - fix the pessimistic handling of variables accessed in exception handler blocks.
14351 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
14352 * parts of the tree could be separated by other instructions, killing the tree
14353 * arguments, or stores killing loads etc. Also, should we fold loads into other
14354 * instructions if the result of the load is used multiple times ?
14355 * - make the REM_IMM optimization in mini-x86.c arch-independent.
14356 * - LAST MERGE: 108395.
14357 * - when returning vtypes in registers, generate IR and append it to the end of the
14358 * last bb instead of doing it in the epilog.
14359 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
14367 - When to decompose opcodes:
14368 - earlier: this makes some optimizations hard to implement, since the low level IR
14369 no longer contains the neccessary information. But it is easier to do.
14370 - later: harder to implement, enables more optimizations.
14371 - Branches inside bblocks:
14372 - created when decomposing complex opcodes.
14373 - branches to another bblock: harmless, but not tracked by the branch
14374 optimizations, so need to branch to a label at the start of the bblock.
14375 - branches to inside the same bblock: very problematic, trips up the local
14376 reg allocator. Can be fixed by spitting the current bblock, but that is a
14377 complex operation, since some local vregs can become global vregs etc.
14378 - Local/global vregs:
14379 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
14380 local register allocator.
14381 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
14382 structure, created by mono_create_var (). Assigned to hregs or the stack by
14383 the global register allocator.
14384 - When to do optimizations like alu->alu_imm:
14385 - earlier -> saves work later on since the IR will be smaller/simpler
14386 - later -> can work on more instructions
14387 - Handling of valuetypes:
14388 - When a vtype is pushed on the stack, a new temporary is created, an
14389 instruction computing its address (LDADDR) is emitted and pushed on
14390 the stack. Need to optimize cases when the vtype is used immediately as in
14391 argument passing, stloc etc.
14392 - Instead of the to_end stuff in the old JIT, simply call the function handling
14393 the values on the stack before emitting the last instruction of the bb.
14396 #else /* !DISABLE_JIT */
14399 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
14403 #endif /* !DISABLE_JIT */