2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
9 * Copyright 2003-2010 Novell, Inc (http://www.novell.com)
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
11 * Licensed under the MIT license. See LICENSE file in the project root for full license information.
28 #ifdef HAVE_SYS_TIME_H
36 #include <mono/utils/memcheck.h>
38 #include <mono/metadata/abi-details.h>
39 #include <mono/metadata/assembly.h>
40 #include <mono/metadata/attrdefs.h>
41 #include <mono/metadata/loader.h>
42 #include <mono/metadata/tabledefs.h>
43 #include <mono/metadata/class.h>
44 #include <mono/metadata/object.h>
45 #include <mono/metadata/exception.h>
46 #include <mono/metadata/opcodes.h>
47 #include <mono/metadata/mono-endian.h>
48 #include <mono/metadata/tokentype.h>
49 #include <mono/metadata/tabledefs.h>
50 #include <mono/metadata/marshal.h>
51 #include <mono/metadata/debug-helpers.h>
52 #include <mono/metadata/mono-debug.h>
53 #include <mono/metadata/mono-debug-debugger.h>
54 #include <mono/metadata/gc-internals.h>
55 #include <mono/metadata/security-manager.h>
56 #include <mono/metadata/threads-types.h>
57 #include <mono/metadata/security-core-clr.h>
58 #include <mono/metadata/profiler-private.h>
59 #include <mono/metadata/profiler.h>
60 #include <mono/metadata/monitor.h>
61 #include <mono/metadata/debug-mono-symfile.h>
62 #include <mono/utils/mono-compiler.h>
63 #include <mono/utils/mono-memory-model.h>
64 #include <mono/utils/mono-error-internals.h>
65 #include <mono/metadata/mono-basic-block.h>
66 #include <mono/metadata/reflection-internals.h>
67 #include <mono/utils/mono-threads-coop.h>
73 #include "jit-icalls.h"
75 #include "debugger-agent.h"
76 #include "seq-points.h"
77 #include "aot-compiler.h"
78 #include "mini-llvm.h"
80 #define BRANCH_COST 10
81 #define INLINE_LENGTH_LIMIT 20
83 /* These have 'cfg' as an implicit argument */
84 #define INLINE_FAILURE(msg) do { \
85 if ((cfg->method != cfg->current_method) && (cfg->current_method->wrapper_type == MONO_WRAPPER_NONE)) { \
86 inline_failure (cfg, msg); \
87 goto exception_exit; \
90 #define CHECK_CFG_EXCEPTION do {\
91 if (cfg->exception_type != MONO_EXCEPTION_NONE) \
92 goto exception_exit; \
94 #define FIELD_ACCESS_FAILURE(method, field) do { \
95 field_access_failure ((cfg), (method), (field)); \
96 goto exception_exit; \
98 #define GENERIC_SHARING_FAILURE(opcode) do { \
100 gshared_failure (cfg, opcode, __FILE__, __LINE__); \
101 goto exception_exit; \
104 #define GSHAREDVT_FAILURE(opcode) do { \
105 if (cfg->gsharedvt) { \
106 gsharedvt_failure (cfg, opcode, __FILE__, __LINE__); \
107 goto exception_exit; \
110 #define OUT_OF_MEMORY_FAILURE do { \
111 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); \
112 mono_error_set_out_of_memory (&cfg->error, ""); \
113 goto exception_exit; \
115 #define DISABLE_AOT(cfg) do { \
116 if ((cfg)->verbose_level >= 2) \
117 printf ("AOT disabled: %s:%d\n", __FILE__, __LINE__); \
118 (cfg)->disable_aot = TRUE; \
120 #define LOAD_ERROR do { \
121 break_on_unverified (); \
122 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD); \
123 goto exception_exit; \
126 #define TYPE_LOAD_ERROR(klass) do { \
127 cfg->exception_ptr = klass; \
131 #define CHECK_CFG_ERROR do {\
132 if (!mono_error_ok (&cfg->error)) { \
133 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); \
134 goto mono_error_exit; \
138 /* Determine whenever 'ins' represents a load of the 'this' argument */
139 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
141 static int ldind_to_load_membase (int opcode);
142 static int stind_to_store_membase (int opcode);
144 int mono_op_to_op_imm (int opcode);
145 int mono_op_to_op_imm_noemul (int opcode);
147 MONO_API MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
149 static int inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
150 guchar *ip, guint real_offset, gboolean inline_always);
152 emit_llvmonly_virtual_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used, MonoInst **sp);
154 /* helper methods signatures */
155 static MonoMethodSignature *helper_sig_domain_get;
156 static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
157 static MonoMethodSignature *helper_sig_llvmonly_imt_trampoline;
159 /* type loading helpers */
160 static GENERATE_GET_CLASS_WITH_CACHE (runtime_helpers, System.Runtime.CompilerServices, RuntimeHelpers)
161 static GENERATE_TRY_GET_CLASS_WITH_CACHE (debuggable_attribute, System.Diagnostics, DebuggableAttribute)
164 * Instruction metadata
172 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
173 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
179 #if SIZEOF_REGISTER == 8 && SIZEOF_REGISTER == SIZEOF_VOID_P
184 /* keep in sync with the enum in mini.h */
187 #include "mini-ops.h"
192 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
193 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
195 * This should contain the index of the last sreg + 1. This is not the same
196 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
198 const gint8 ins_sreg_counts[] = {
199 #include "mini-ops.h"
204 #define MONO_INIT_VARINFO(vi,id) do { \
205 (vi)->range.first_use.pos.bid = 0xffff; \
211 mono_alloc_ireg (MonoCompile *cfg)
213 return alloc_ireg (cfg);
217 mono_alloc_lreg (MonoCompile *cfg)
219 return alloc_lreg (cfg);
223 mono_alloc_freg (MonoCompile *cfg)
225 return alloc_freg (cfg);
229 mono_alloc_preg (MonoCompile *cfg)
231 return alloc_preg (cfg);
235 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
237 return alloc_dreg (cfg, stack_type);
241 * mono_alloc_ireg_ref:
243 * Allocate an IREG, and mark it as holding a GC ref.
246 mono_alloc_ireg_ref (MonoCompile *cfg)
248 return alloc_ireg_ref (cfg);
252 * mono_alloc_ireg_mp:
254 * Allocate an IREG, and mark it as holding a managed pointer.
257 mono_alloc_ireg_mp (MonoCompile *cfg)
259 return alloc_ireg_mp (cfg);
263 * mono_alloc_ireg_copy:
265 * Allocate an IREG with the same GC type as VREG.
268 mono_alloc_ireg_copy (MonoCompile *cfg, guint32 vreg)
270 if (vreg_is_ref (cfg, vreg))
271 return alloc_ireg_ref (cfg);
272 else if (vreg_is_mp (cfg, vreg))
273 return alloc_ireg_mp (cfg);
275 return alloc_ireg (cfg);
279 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
284 type = mini_get_underlying_type (type);
286 switch (type->type) {
299 case MONO_TYPE_FNPTR:
301 case MONO_TYPE_CLASS:
302 case MONO_TYPE_STRING:
303 case MONO_TYPE_OBJECT:
304 case MONO_TYPE_SZARRAY:
305 case MONO_TYPE_ARRAY:
309 #if SIZEOF_REGISTER == 8
315 return cfg->r4fp ? OP_RMOVE : OP_FMOVE;
318 case MONO_TYPE_VALUETYPE:
319 if (type->data.klass->enumtype) {
320 type = mono_class_enum_basetype (type->data.klass);
323 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
326 case MONO_TYPE_TYPEDBYREF:
328 case MONO_TYPE_GENERICINST:
329 type = &type->data.generic_class->container_class->byval_arg;
333 g_assert (cfg->gshared);
334 if (mini_type_var_is_vt (type))
337 return mono_type_to_regmove (cfg, mini_get_underlying_type (type));
339 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
345 mono_print_bb (MonoBasicBlock *bb, const char *msg)
350 printf ("\n%s %d: [IN: ", msg, bb->block_num);
351 for (i = 0; i < bb->in_count; ++i)
352 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
354 for (i = 0; i < bb->out_count; ++i)
355 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
357 for (tree = bb->code; tree; tree = tree->next)
358 mono_print_ins_index (-1, tree);
362 mono_create_helper_signatures (void)
364 helper_sig_domain_get = mono_create_icall_signature ("ptr");
365 helper_sig_rgctx_lazy_fetch_trampoline = mono_create_icall_signature ("ptr ptr");
366 helper_sig_llvmonly_imt_trampoline = mono_create_icall_signature ("ptr ptr ptr");
369 static MONO_NEVER_INLINE void
370 break_on_unverified (void)
372 if (mini_get_debug_options ()->break_on_unverified)
376 static MONO_NEVER_INLINE void
377 field_access_failure (MonoCompile *cfg, MonoMethod *method, MonoClassField *field)
379 char *method_fname = mono_method_full_name (method, TRUE);
380 char *field_fname = mono_field_full_name (field);
381 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
382 mono_error_set_generic_error (&cfg->error, "System", "FieldAccessException", "Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname);
383 g_free (method_fname);
384 g_free (field_fname);
387 static MONO_NEVER_INLINE void
388 inline_failure (MonoCompile *cfg, const char *msg)
390 if (cfg->verbose_level >= 2)
391 printf ("inline failed: %s\n", msg);
392 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INLINE_FAILED);
395 static MONO_NEVER_INLINE void
396 gshared_failure (MonoCompile *cfg, int opcode, const char *file, int line)
398 if (cfg->verbose_level > 2) \
399 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), line);
400 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
403 static MONO_NEVER_INLINE void
404 gsharedvt_failure (MonoCompile *cfg, int opcode, const char *file, int line)
406 cfg->exception_message = g_strdup_printf ("gsharedvt failed for method %s.%s.%s/%d opcode %s %s:%d", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), file, line);
407 if (cfg->verbose_level >= 2)
408 printf ("%s\n", cfg->exception_message);
409 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
413 * When using gsharedvt, some instatiations might be verifiable, and some might be not. i.e.
414 * foo<T> (int i) { ldarg.0; box T; }
416 #define UNVERIFIED do { \
417 if (cfg->gsharedvt) { \
418 if (cfg->verbose_level > 2) \
419 printf ("gsharedvt method failed to verify, falling back to instantiation.\n"); \
420 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
421 goto exception_exit; \
423 break_on_unverified (); \
427 #define GET_BBLOCK(cfg,tblock,ip) do { \
428 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
430 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
431 NEW_BBLOCK (cfg, (tblock)); \
432 (tblock)->cil_code = (ip); \
433 ADD_BBLOCK (cfg, (tblock)); \
437 #if defined(TARGET_X86) || defined(TARGET_AMD64)
438 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
439 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
440 (dest)->dreg = alloc_ireg_mp ((cfg)); \
441 (dest)->sreg1 = (sr1); \
442 (dest)->sreg2 = (sr2); \
443 (dest)->inst_imm = (imm); \
444 (dest)->backend.shift_amount = (shift); \
445 MONO_ADD_INS ((cfg)->cbb, (dest)); \
449 /* Emit conversions so both operands of a binary opcode are of the same type */
451 add_widen_op (MonoCompile *cfg, MonoInst *ins, MonoInst **arg1_ref, MonoInst **arg2_ref)
453 MonoInst *arg1 = *arg1_ref;
454 MonoInst *arg2 = *arg2_ref;
457 ((arg1->type == STACK_R4 && arg2->type == STACK_R8) ||
458 (arg1->type == STACK_R8 && arg2->type == STACK_R4))) {
461 /* Mixing r4/r8 is allowed by the spec */
462 if (arg1->type == STACK_R4) {
463 int dreg = alloc_freg (cfg);
465 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg1->dreg);
466 conv->type = STACK_R8;
470 if (arg2->type == STACK_R4) {
471 int dreg = alloc_freg (cfg);
473 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg2->dreg);
474 conv->type = STACK_R8;
480 #if SIZEOF_REGISTER == 8
481 /* FIXME: Need to add many more cases */
482 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) {
485 int dr = alloc_preg (cfg);
486 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg);
487 (ins)->sreg2 = widen->dreg;
492 #define ADD_BINOP(op) do { \
493 MONO_INST_NEW (cfg, ins, (op)); \
495 ins->sreg1 = sp [0]->dreg; \
496 ins->sreg2 = sp [1]->dreg; \
497 type_from_op (cfg, ins, sp [0], sp [1]); \
499 /* Have to insert a widening op */ \
500 add_widen_op (cfg, ins, &sp [0], &sp [1]); \
501 ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type); \
502 MONO_ADD_INS ((cfg)->cbb, (ins)); \
503 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
506 #define ADD_UNOP(op) do { \
507 MONO_INST_NEW (cfg, ins, (op)); \
509 ins->sreg1 = sp [0]->dreg; \
510 type_from_op (cfg, ins, sp [0], NULL); \
512 (ins)->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type); \
513 MONO_ADD_INS ((cfg)->cbb, (ins)); \
514 *sp++ = mono_decompose_opcode (cfg, ins); \
517 #define ADD_BINCOND(next_block) do { \
520 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
521 cmp->sreg1 = sp [0]->dreg; \
522 cmp->sreg2 = sp [1]->dreg; \
523 type_from_op (cfg, cmp, sp [0], sp [1]); \
525 add_widen_op (cfg, cmp, &sp [0], &sp [1]); \
526 type_from_op (cfg, ins, sp [0], sp [1]); \
527 ins->inst_many_bb = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
528 GET_BBLOCK (cfg, tblock, target); \
529 link_bblock (cfg, cfg->cbb, tblock); \
530 ins->inst_true_bb = tblock; \
531 if ((next_block)) { \
532 link_bblock (cfg, cfg->cbb, (next_block)); \
533 ins->inst_false_bb = (next_block); \
534 start_new_bblock = 1; \
536 GET_BBLOCK (cfg, tblock, ip); \
537 link_bblock (cfg, cfg->cbb, tblock); \
538 ins->inst_false_bb = tblock; \
539 start_new_bblock = 2; \
541 if (sp != stack_start) { \
542 handle_stack_args (cfg, stack_start, sp - stack_start); \
543 CHECK_UNVERIFIABLE (cfg); \
545 MONO_ADD_INS (cfg->cbb, cmp); \
546 MONO_ADD_INS (cfg->cbb, ins); \
550 * link_bblock: Links two basic blocks
552 * links two basic blocks in the control flow graph, the 'from'
553 * argument is the starting block and the 'to' argument is the block
554 * the control flow ends to after 'from'.
557 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
559 MonoBasicBlock **newa;
563 if (from->cil_code) {
565 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
567 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
570 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
572 printf ("edge from entry to exit\n");
577 for (i = 0; i < from->out_count; ++i) {
578 if (to == from->out_bb [i]) {
584 newa = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
585 for (i = 0; i < from->out_count; ++i) {
586 newa [i] = from->out_bb [i];
594 for (i = 0; i < to->in_count; ++i) {
595 if (from == to->in_bb [i]) {
601 newa = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
602 for (i = 0; i < to->in_count; ++i) {
603 newa [i] = to->in_bb [i];
612 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
614 link_bblock (cfg, from, to);
618 * mono_find_block_region:
620 * We mark each basic block with a region ID. We use that to avoid BB
621 * optimizations when blocks are in different regions.
624 * A region token that encodes where this region is, and information
625 * about the clause owner for this block.
627 * The region encodes the try/catch/filter clause that owns this block
628 * as well as the type. -1 is a special value that represents a block
629 * that is in none of try/catch/filter.
632 mono_find_block_region (MonoCompile *cfg, int offset)
634 MonoMethodHeader *header = cfg->header;
635 MonoExceptionClause *clause;
638 for (i = 0; i < header->num_clauses; ++i) {
639 clause = &header->clauses [i];
640 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
641 (offset < (clause->handler_offset)))
642 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
644 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
645 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
646 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
647 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
648 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
650 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
653 for (i = 0; i < header->num_clauses; ++i) {
654 clause = &header->clauses [i];
656 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
657 return ((i + 1) << 8) | clause->flags;
664 ip_in_finally_clause (MonoCompile *cfg, int offset)
666 MonoMethodHeader *header = cfg->header;
667 MonoExceptionClause *clause;
670 for (i = 0; i < header->num_clauses; ++i) {
671 clause = &header->clauses [i];
672 if (clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY && clause->flags != MONO_EXCEPTION_CLAUSE_FAULT)
675 if (MONO_OFFSET_IN_HANDLER (clause, offset))
682 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
684 MonoMethodHeader *header = cfg->header;
685 MonoExceptionClause *clause;
689 for (i = 0; i < header->num_clauses; ++i) {
690 clause = &header->clauses [i];
691 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
692 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
693 if (clause->flags == type)
694 res = g_list_append (res, clause);
701 mono_create_spvar_for_region (MonoCompile *cfg, int region)
705 var = (MonoInst *)g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
709 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
710 /* prevent it from being register allocated */
711 var->flags |= MONO_INST_VOLATILE;
713 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
717 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
719 return (MonoInst *)g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
723 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
727 var = (MonoInst *)g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
731 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
732 /* prevent it from being register allocated */
733 var->flags |= MONO_INST_VOLATILE;
735 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
741 * Returns the type used in the eval stack when @type is loaded.
742 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
745 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
749 type = mini_get_underlying_type (type);
750 inst->klass = klass = mono_class_from_mono_type (type);
752 inst->type = STACK_MP;
757 switch (type->type) {
759 inst->type = STACK_INV;
767 inst->type = STACK_I4;
772 case MONO_TYPE_FNPTR:
773 inst->type = STACK_PTR;
775 case MONO_TYPE_CLASS:
776 case MONO_TYPE_STRING:
777 case MONO_TYPE_OBJECT:
778 case MONO_TYPE_SZARRAY:
779 case MONO_TYPE_ARRAY:
780 inst->type = STACK_OBJ;
784 inst->type = STACK_I8;
787 inst->type = cfg->r4_stack_type;
790 inst->type = STACK_R8;
792 case MONO_TYPE_VALUETYPE:
793 if (type->data.klass->enumtype) {
794 type = mono_class_enum_basetype (type->data.klass);
798 inst->type = STACK_VTYPE;
801 case MONO_TYPE_TYPEDBYREF:
802 inst->klass = mono_defaults.typed_reference_class;
803 inst->type = STACK_VTYPE;
805 case MONO_TYPE_GENERICINST:
806 type = &type->data.generic_class->container_class->byval_arg;
810 g_assert (cfg->gshared);
811 if (mini_is_gsharedvt_type (type)) {
812 g_assert (cfg->gsharedvt);
813 inst->type = STACK_VTYPE;
815 type_to_eval_stack_type (cfg, mini_get_underlying_type (type), inst);
819 g_error ("unknown type 0x%02x in eval stack type", type->type);
824 * The following tables are used to quickly validate the IL code in type_from_op ().
827 bin_num_table [STACK_MAX] [STACK_MAX] = {
828 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
829 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
830 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
831 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
832 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R8},
833 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
834 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
835 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
836 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4}
841 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4
844 /* reduce the size of this table */
846 bin_int_table [STACK_MAX] [STACK_MAX] = {
847 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
848 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
849 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
850 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
851 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
852 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
853 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
854 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
858 bin_comp_table [STACK_MAX] [STACK_MAX] = {
859 /* Inv i L p F & O vt r4 */
861 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
862 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
863 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
864 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* F, R8 */
865 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
866 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
867 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
868 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* r, r4 */
871 /* reduce the size of this table */
873 shift_table [STACK_MAX] [STACK_MAX] = {
874 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
875 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
876 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
877 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
878 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
879 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
880 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
881 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
885 * Tables to map from the non-specific opcode to the matching
886 * type-specific opcode.
888 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
890 binops_op_map [STACK_MAX] = {
891 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD, 0, 0, OP_RADD-CEE_ADD
894 /* handles from CEE_NEG to CEE_CONV_U8 */
896 unops_op_map [STACK_MAX] = {
897 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG, 0, 0, OP_RNEG-CEE_NEG
900 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
902 ovfops_op_map [STACK_MAX] = {
903 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, 0, OP_RCONV_TO_U2-CEE_CONV_U2
906 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
908 ovf2ops_op_map [STACK_MAX] = {
909 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, 0, 0, OP_RCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
912 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
914 ovf3ops_op_map [STACK_MAX] = {
915 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, 0, 0, OP_RCONV_TO_OVF_I1-CEE_CONV_OVF_I1
918 /* handles from CEE_BEQ to CEE_BLT_UN */
920 beqops_op_map [STACK_MAX] = {
921 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, 0, OP_FBEQ-CEE_BEQ
924 /* handles from CEE_CEQ to CEE_CLT_UN */
926 ceqops_op_map [STACK_MAX] = {
927 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, 0, OP_RCEQ-OP_CEQ
931 * Sets ins->type (the type on the eval stack) according to the
932 * type of the opcode and the arguments to it.
933 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
935 * FIXME: this function sets ins->type unconditionally in some cases, but
936 * it should set it to invalid for some types (a conv.x on an object)
939 type_from_op (MonoCompile *cfg, MonoInst *ins, MonoInst *src1, MonoInst *src2)
941 switch (ins->opcode) {
948 /* FIXME: check unverifiable args for STACK_MP */
949 ins->type = bin_num_table [src1->type] [src2->type];
950 ins->opcode += binops_op_map [ins->type];
957 ins->type = bin_int_table [src1->type] [src2->type];
958 ins->opcode += binops_op_map [ins->type];
963 ins->type = shift_table [src1->type] [src2->type];
964 ins->opcode += binops_op_map [ins->type];
969 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
970 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
971 ins->opcode = OP_LCOMPARE;
972 else if (src1->type == STACK_R4)
973 ins->opcode = OP_RCOMPARE;
974 else if (src1->type == STACK_R8)
975 ins->opcode = OP_FCOMPARE;
977 ins->opcode = OP_ICOMPARE;
979 case OP_ICOMPARE_IMM:
980 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
981 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
982 ins->opcode = OP_LCOMPARE_IMM;
994 ins->opcode += beqops_op_map [src1->type];
997 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
998 ins->opcode += ceqops_op_map [src1->type];
1004 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
1005 ins->opcode += ceqops_op_map [src1->type];
1009 ins->type = neg_table [src1->type];
1010 ins->opcode += unops_op_map [ins->type];
1013 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
1014 ins->type = src1->type;
1016 ins->type = STACK_INV;
1017 ins->opcode += unops_op_map [ins->type];
1023 ins->type = STACK_I4;
1024 ins->opcode += unops_op_map [src1->type];
1027 ins->type = STACK_R8;
1028 switch (src1->type) {
1031 ins->opcode = OP_ICONV_TO_R_UN;
1034 ins->opcode = OP_LCONV_TO_R_UN;
1038 case CEE_CONV_OVF_I1:
1039 case CEE_CONV_OVF_U1:
1040 case CEE_CONV_OVF_I2:
1041 case CEE_CONV_OVF_U2:
1042 case CEE_CONV_OVF_I4:
1043 case CEE_CONV_OVF_U4:
1044 ins->type = STACK_I4;
1045 ins->opcode += ovf3ops_op_map [src1->type];
1047 case CEE_CONV_OVF_I_UN:
1048 case CEE_CONV_OVF_U_UN:
1049 ins->type = STACK_PTR;
1050 ins->opcode += ovf2ops_op_map [src1->type];
1052 case CEE_CONV_OVF_I1_UN:
1053 case CEE_CONV_OVF_I2_UN:
1054 case CEE_CONV_OVF_I4_UN:
1055 case CEE_CONV_OVF_U1_UN:
1056 case CEE_CONV_OVF_U2_UN:
1057 case CEE_CONV_OVF_U4_UN:
1058 ins->type = STACK_I4;
1059 ins->opcode += ovf2ops_op_map [src1->type];
1062 ins->type = STACK_PTR;
1063 switch (src1->type) {
1065 ins->opcode = OP_ICONV_TO_U;
1069 #if SIZEOF_VOID_P == 8
1070 ins->opcode = OP_LCONV_TO_U;
1072 ins->opcode = OP_MOVE;
1076 ins->opcode = OP_LCONV_TO_U;
1079 ins->opcode = OP_FCONV_TO_U;
1085 ins->type = STACK_I8;
1086 ins->opcode += unops_op_map [src1->type];
1088 case CEE_CONV_OVF_I8:
1089 case CEE_CONV_OVF_U8:
1090 ins->type = STACK_I8;
1091 ins->opcode += ovf3ops_op_map [src1->type];
1093 case CEE_CONV_OVF_U8_UN:
1094 case CEE_CONV_OVF_I8_UN:
1095 ins->type = STACK_I8;
1096 ins->opcode += ovf2ops_op_map [src1->type];
1099 ins->type = cfg->r4_stack_type;
1100 ins->opcode += unops_op_map [src1->type];
1103 ins->type = STACK_R8;
1104 ins->opcode += unops_op_map [src1->type];
1107 ins->type = STACK_R8;
1111 ins->type = STACK_I4;
1112 ins->opcode += ovfops_op_map [src1->type];
1115 case CEE_CONV_OVF_I:
1116 case CEE_CONV_OVF_U:
1117 ins->type = STACK_PTR;
1118 ins->opcode += ovfops_op_map [src1->type];
1121 case CEE_ADD_OVF_UN:
1123 case CEE_MUL_OVF_UN:
1125 case CEE_SUB_OVF_UN:
1126 ins->type = bin_num_table [src1->type] [src2->type];
1127 ins->opcode += ovfops_op_map [src1->type];
1128 if (ins->type == STACK_R8)
1129 ins->type = STACK_INV;
1131 case OP_LOAD_MEMBASE:
1132 ins->type = STACK_PTR;
1134 case OP_LOADI1_MEMBASE:
1135 case OP_LOADU1_MEMBASE:
1136 case OP_LOADI2_MEMBASE:
1137 case OP_LOADU2_MEMBASE:
1138 case OP_LOADI4_MEMBASE:
1139 case OP_LOADU4_MEMBASE:
1140 ins->type = STACK_PTR;
1142 case OP_LOADI8_MEMBASE:
1143 ins->type = STACK_I8;
1145 case OP_LOADR4_MEMBASE:
1146 ins->type = cfg->r4_stack_type;
1148 case OP_LOADR8_MEMBASE:
1149 ins->type = STACK_R8;
1152 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
1156 if (ins->type == STACK_MP)
1157 ins->klass = mono_defaults.object_class;
1162 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
1168 param_table [STACK_MAX] [STACK_MAX] = {
1173 check_values_to_signature (MonoInst *args, MonoType *this_ins, MonoMethodSignature *sig)
1178 switch (args->type) {
1188 for (i = 0; i < sig->param_count; ++i) {
1189 switch (args [i].type) {
1193 if (!sig->params [i]->byref)
1197 if (sig->params [i]->byref)
1199 switch (sig->params [i]->type) {
1200 case MONO_TYPE_CLASS:
1201 case MONO_TYPE_STRING:
1202 case MONO_TYPE_OBJECT:
1203 case MONO_TYPE_SZARRAY:
1204 case MONO_TYPE_ARRAY:
1211 if (sig->params [i]->byref)
1213 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1222 /*if (!param_table [args [i].type] [sig->params [i]->type])
1230 * When we need a pointer to the current domain many times in a method, we
1231 * call mono_domain_get() once and we store the result in a local variable.
1232 * This function returns the variable that represents the MonoDomain*.
1234 inline static MonoInst *
1235 mono_get_domainvar (MonoCompile *cfg)
1237 if (!cfg->domainvar)
1238 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1239 return cfg->domainvar;
1243 * The got_var contains the address of the Global Offset Table when AOT
1247 mono_get_got_var (MonoCompile *cfg)
1249 if (!cfg->compile_aot || !cfg->backend->need_got_var)
1251 if (!cfg->got_var) {
1252 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1254 return cfg->got_var;
1258 mono_get_vtable_var (MonoCompile *cfg)
1260 g_assert (cfg->gshared);
1262 if (!cfg->rgctx_var) {
1263 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1264 /* force the var to be stack allocated */
1265 cfg->rgctx_var->flags |= MONO_INST_VOLATILE;
1268 return cfg->rgctx_var;
1272 type_from_stack_type (MonoInst *ins) {
1273 switch (ins->type) {
1274 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1275 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1276 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1277 case STACK_R4: return &mono_defaults.single_class->byval_arg;
1278 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1280 return &ins->klass->this_arg;
1281 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1282 case STACK_VTYPE: return &ins->klass->byval_arg;
1284 g_error ("stack type %d to monotype not handled\n", ins->type);
1289 static G_GNUC_UNUSED int
1290 type_to_stack_type (MonoCompile *cfg, MonoType *t)
1292 t = mono_type_get_underlying_type (t);
1304 case MONO_TYPE_FNPTR:
1306 case MONO_TYPE_CLASS:
1307 case MONO_TYPE_STRING:
1308 case MONO_TYPE_OBJECT:
1309 case MONO_TYPE_SZARRAY:
1310 case MONO_TYPE_ARRAY:
1316 return cfg->r4_stack_type;
1319 case MONO_TYPE_VALUETYPE:
1320 case MONO_TYPE_TYPEDBYREF:
1322 case MONO_TYPE_GENERICINST:
1323 if (mono_type_generic_inst_is_valuetype (t))
1329 g_assert_not_reached ();
1336 array_access_to_klass (int opcode)
1340 return mono_defaults.byte_class;
1342 return mono_defaults.uint16_class;
1345 return mono_defaults.int_class;
1348 return mono_defaults.sbyte_class;
1351 return mono_defaults.int16_class;
1354 return mono_defaults.int32_class;
1356 return mono_defaults.uint32_class;
1359 return mono_defaults.int64_class;
1362 return mono_defaults.single_class;
1365 return mono_defaults.double_class;
1366 case CEE_LDELEM_REF:
1367 case CEE_STELEM_REF:
1368 return mono_defaults.object_class;
1370 g_assert_not_reached ();
1376 * We try to share variables when possible
1379 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1384 /* inlining can result in deeper stacks */
1385 if (slot >= cfg->header->max_stack)
1386 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1388 pos = ins->type - 1 + slot * STACK_MAX;
1390 switch (ins->type) {
1397 if ((vnum = cfg->intvars [pos]))
1398 return cfg->varinfo [vnum];
1399 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1400 cfg->intvars [pos] = res->inst_c0;
1403 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1409 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1412 * Don't use this if a generic_context is set, since that means AOT can't
1413 * look up the method using just the image+token.
1414 * table == 0 means this is a reference made from a wrapper.
1416 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1417 MonoJumpInfoToken *jump_info_token = (MonoJumpInfoToken *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1418 jump_info_token->image = image;
1419 jump_info_token->token = token;
1420 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1425 * This function is called to handle items that are left on the evaluation stack
1426 * at basic block boundaries. What happens is that we save the values to local variables
1427 * and we reload them later when first entering the target basic block (with the
1428 * handle_loaded_temps () function).
1429 * A single joint point will use the same variables (stored in the array bb->out_stack or
1430 * bb->in_stack, if the basic block is before or after the joint point).
1432 * This function needs to be called _before_ emitting the last instruction of
1433 * the bb (i.e. before emitting a branch).
1434 * If the stack merge fails at a join point, cfg->unverifiable is set.
1437 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1440 MonoBasicBlock *bb = cfg->cbb;
1441 MonoBasicBlock *outb;
1442 MonoInst *inst, **locals;
1447 if (cfg->verbose_level > 3)
1448 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1449 if (!bb->out_scount) {
1450 bb->out_scount = count;
1451 //printf ("bblock %d has out:", bb->block_num);
1453 for (i = 0; i < bb->out_count; ++i) {
1454 outb = bb->out_bb [i];
1455 /* exception handlers are linked, but they should not be considered for stack args */
1456 if (outb->flags & BB_EXCEPTION_HANDLER)
1458 //printf (" %d", outb->block_num);
1459 if (outb->in_stack) {
1461 bb->out_stack = outb->in_stack;
1467 bb->out_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1468 for (i = 0; i < count; ++i) {
1470 * try to reuse temps already allocated for this purpouse, if they occupy the same
1471 * stack slot and if they are of the same type.
1472 * This won't cause conflicts since if 'local' is used to
1473 * store one of the values in the in_stack of a bblock, then
1474 * the same variable will be used for the same outgoing stack
1476 * This doesn't work when inlining methods, since the bblocks
1477 * in the inlined methods do not inherit their in_stack from
1478 * the bblock they are inlined to. See bug #58863 for an
1481 if (cfg->inlined_method)
1482 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1484 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1489 for (i = 0; i < bb->out_count; ++i) {
1490 outb = bb->out_bb [i];
1491 /* exception handlers are linked, but they should not be considered for stack args */
1492 if (outb->flags & BB_EXCEPTION_HANDLER)
1494 if (outb->in_scount) {
1495 if (outb->in_scount != bb->out_scount) {
1496 cfg->unverifiable = TRUE;
1499 continue; /* check they are the same locals */
1501 outb->in_scount = count;
1502 outb->in_stack = bb->out_stack;
1505 locals = bb->out_stack;
1507 for (i = 0; i < count; ++i) {
1508 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1509 inst->cil_code = sp [i]->cil_code;
1510 sp [i] = locals [i];
1511 if (cfg->verbose_level > 3)
1512 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1516 * It is possible that the out bblocks already have in_stack assigned, and
1517 * the in_stacks differ. In this case, we will store to all the different
1524 /* Find a bblock which has a different in_stack */
1526 while (bindex < bb->out_count) {
1527 outb = bb->out_bb [bindex];
1528 /* exception handlers are linked, but they should not be considered for stack args */
1529 if (outb->flags & BB_EXCEPTION_HANDLER) {
1533 if (outb->in_stack != locals) {
1534 for (i = 0; i < count; ++i) {
1535 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1536 inst->cil_code = sp [i]->cil_code;
1537 sp [i] = locals [i];
1538 if (cfg->verbose_level > 3)
1539 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1541 locals = outb->in_stack;
1551 emit_runtime_constant (MonoCompile *cfg, MonoJumpInfoType patch_type, gpointer data)
1555 if (cfg->compile_aot) {
1556 EMIT_NEW_AOTCONST (cfg, ins, patch_type, data);
1562 ji.type = patch_type;
1563 ji.data.target = data;
1564 target = mono_resolve_patch_target (NULL, cfg->domain, NULL, &ji, FALSE, &error);
1565 mono_error_assert_ok (&error);
1567 EMIT_NEW_PCONST (cfg, ins, target);
1573 mini_emit_interface_bitmap_check (MonoCompile *cfg, int intf_bit_reg, int base_reg, int offset, MonoClass *klass)
1575 int ibitmap_reg = alloc_preg (cfg);
1576 #ifdef COMPRESSED_INTERFACE_BITMAP
1578 MonoInst *res, *ins;
1579 NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, ibitmap_reg, base_reg, offset);
1580 MONO_ADD_INS (cfg->cbb, ins);
1582 args [1] = emit_runtime_constant (cfg, MONO_PATCH_INFO_IID, klass);
1583 res = mono_emit_jit_icall (cfg, mono_class_interface_match, args);
1584 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, intf_bit_reg, res->dreg);
1586 int ibitmap_byte_reg = alloc_preg (cfg);
1588 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, base_reg, offset);
1590 if (cfg->compile_aot) {
1591 int iid_reg = alloc_preg (cfg);
1592 int shifted_iid_reg = alloc_preg (cfg);
1593 int ibitmap_byte_address_reg = alloc_preg (cfg);
1594 int masked_iid_reg = alloc_preg (cfg);
1595 int iid_one_bit_reg = alloc_preg (cfg);
1596 int iid_bit_reg = alloc_preg (cfg);
1597 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1598 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1599 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1600 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1601 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1602 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1603 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1604 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1606 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1607 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1613 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1614 * stored in "klass_reg" implements the interface "klass".
1617 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1619 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, interface_bitmap), klass);
1623 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1624 * stored in "vtable_reg" implements the interface "klass".
1627 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1629 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, interface_bitmap), klass);
1633 * Emit code which checks whenever the interface id of @klass is smaller than
1634 * than the value given by max_iid_reg.
1637 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1638 MonoBasicBlock *false_target)
1640 if (cfg->compile_aot) {
1641 int iid_reg = alloc_preg (cfg);
1642 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1643 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1646 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1648 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1650 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1653 /* Same as above, but obtains max_iid from a vtable */
1655 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1656 MonoBasicBlock *false_target)
1658 int max_iid_reg = alloc_preg (cfg);
1660 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, max_interface_id));
1661 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1664 /* Same as above, but obtains max_iid from a klass */
1666 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1667 MonoBasicBlock *false_target)
1669 int max_iid_reg = alloc_preg (cfg);
1671 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, max_interface_id));
1672 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1676 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1678 int idepth_reg = alloc_preg (cfg);
1679 int stypes_reg = alloc_preg (cfg);
1680 int stype = alloc_preg (cfg);
1682 mono_class_setup_supertypes (klass);
1684 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1685 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, idepth));
1686 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1687 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1689 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, supertypes));
1690 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1692 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1693 } else if (cfg->compile_aot) {
1694 int const_reg = alloc_preg (cfg);
1695 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1696 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1698 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1700 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1704 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1706 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1710 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1712 int intf_reg = alloc_preg (cfg);
1714 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1715 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1716 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1718 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1720 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1724 * Variant of the above that takes a register to the class, not the vtable.
1727 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1729 int intf_bit_reg = alloc_preg (cfg);
1731 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1732 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1733 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1735 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1737 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1741 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1744 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1746 MonoInst *ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_CLASS, klass);
1747 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, ins->dreg);
1749 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1753 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1755 mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1759 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1761 if (cfg->compile_aot) {
1762 int const_reg = alloc_preg (cfg);
1763 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1764 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1766 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1768 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1772 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1775 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1778 int rank_reg = alloc_preg (cfg);
1779 int eclass_reg = alloc_preg (cfg);
1781 g_assert (!klass_inst);
1782 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, rank));
1783 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1784 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1785 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
1786 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, cast_class));
1787 if (klass->cast_class == mono_defaults.object_class) {
1788 int parent_reg = alloc_preg (cfg);
1789 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, MONO_STRUCT_OFFSET (MonoClass, parent));
1790 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1791 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1792 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1793 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1794 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1795 } else if (klass->cast_class == mono_defaults.enum_class) {
1796 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1797 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1798 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1800 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1801 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1804 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1805 /* Check that the object is a vector too */
1806 int bounds_reg = alloc_preg (cfg);
1807 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, MONO_STRUCT_OFFSET (MonoArray, bounds));
1808 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1809 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1812 int idepth_reg = alloc_preg (cfg);
1813 int stypes_reg = alloc_preg (cfg);
1814 int stype = alloc_preg (cfg);
1816 mono_class_setup_supertypes (klass);
1818 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1819 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, idepth));
1820 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1821 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1823 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, supertypes));
1824 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1825 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1830 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1832 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1836 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1840 g_assert (val == 0);
1845 if ((size <= SIZEOF_REGISTER) && (size <= align)) {
1848 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1851 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1854 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1856 #if SIZEOF_REGISTER == 8
1858 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1864 val_reg = alloc_preg (cfg);
1866 if (SIZEOF_REGISTER == 8)
1867 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1869 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1872 /* This could be optimized further if neccesary */
1874 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1881 if (!cfg->backend->no_unaligned_access && SIZEOF_REGISTER == 8) {
1883 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1888 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1895 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1900 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1905 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1912 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1919 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1920 g_assert (size < 10000);
1923 /* This could be optimized further if neccesary */
1925 cur_reg = alloc_preg (cfg);
1926 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1927 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1934 if (!cfg->backend->no_unaligned_access && SIZEOF_REGISTER == 8) {
1936 cur_reg = alloc_preg (cfg);
1937 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1938 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1946 cur_reg = alloc_preg (cfg);
1947 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1948 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1954 cur_reg = alloc_preg (cfg);
1955 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1956 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1962 cur_reg = alloc_preg (cfg);
1963 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1964 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1972 emit_tls_set (MonoCompile *cfg, int sreg1, MonoTlsKey tls_key)
1976 if (cfg->compile_aot) {
1977 EMIT_NEW_TLS_OFFSETCONST (cfg, c, tls_key);
1978 MONO_INST_NEW (cfg, ins, OP_TLS_SET_REG);
1980 ins->sreg2 = c->dreg;
1981 MONO_ADD_INS (cfg->cbb, ins);
1983 MONO_INST_NEW (cfg, ins, OP_TLS_SET);
1985 ins->inst_offset = mini_get_tls_offset (tls_key);
1986 MONO_ADD_INS (cfg->cbb, ins);
1993 * Emit IR to push the current LMF onto the LMF stack.
1996 emit_push_lmf (MonoCompile *cfg)
1999 * Emit IR to push the LMF:
2000 * lmf_addr = <lmf_addr from tls>
2001 * lmf->lmf_addr = lmf_addr
2002 * lmf->prev_lmf = *lmf_addr
2005 int lmf_reg, prev_lmf_reg;
2006 MonoInst *ins, *lmf_ins;
2011 if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
2012 /* Load current lmf */
2013 lmf_ins = mono_get_lmf_intrinsic (cfg);
2015 MONO_ADD_INS (cfg->cbb, lmf_ins);
2016 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
2017 lmf_reg = ins->dreg;
2018 /* Save previous_lmf */
2019 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), lmf_ins->dreg);
2021 emit_tls_set (cfg, lmf_reg, TLS_KEY_LMF);
2024 * Store lmf_addr in a variable, so it can be allocated to a global register.
2026 if (!cfg->lmf_addr_var)
2027 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2030 ins = mono_get_jit_tls_intrinsic (cfg);
2032 int jit_tls_dreg = ins->dreg;
2034 MONO_ADD_INS (cfg->cbb, ins);
2035 lmf_reg = alloc_preg (cfg);
2036 EMIT_NEW_BIALU_IMM (cfg, lmf_ins, OP_PADD_IMM, lmf_reg, jit_tls_dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
2038 lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
2041 lmf_ins = mono_get_lmf_addr_intrinsic (cfg);
2043 MONO_ADD_INS (cfg->cbb, lmf_ins);
2046 MonoInst *args [16], *jit_tls_ins, *ins;
2048 /* Inline mono_get_lmf_addr () */
2049 /* jit_tls = pthread_getspecific (mono_jit_tls_id); lmf_addr = &jit_tls->lmf; */
2051 /* Load mono_jit_tls_id */
2052 if (cfg->compile_aot)
2053 EMIT_NEW_AOTCONST (cfg, args [0], MONO_PATCH_INFO_JIT_TLS_ID, NULL);
2055 EMIT_NEW_ICONST (cfg, args [0], mono_jit_tls_id);
2056 /* call pthread_getspecific () */
2057 jit_tls_ins = mono_emit_jit_icall (cfg, pthread_getspecific, args);
2058 /* lmf_addr = &jit_tls->lmf */
2059 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, cfg->lmf_addr_var->dreg, jit_tls_ins->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
2062 lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
2066 lmf_ins->dreg = cfg->lmf_addr_var->dreg;
2068 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
2069 lmf_reg = ins->dreg;
2071 prev_lmf_reg = alloc_preg (cfg);
2072 /* Save previous_lmf */
2073 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, cfg->lmf_addr_var->dreg, 0);
2074 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), prev_lmf_reg);
2076 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, cfg->lmf_addr_var->dreg, 0, lmf_reg);
2083 * Emit IR to pop the current LMF from the LMF stack.
2086 emit_pop_lmf (MonoCompile *cfg)
2088 int lmf_reg, lmf_addr_reg, prev_lmf_reg;
2094 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
2095 lmf_reg = ins->dreg;
2097 if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
2098 /* Load previous_lmf */
2099 prev_lmf_reg = alloc_preg (cfg);
2100 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
2102 emit_tls_set (cfg, prev_lmf_reg, TLS_KEY_LMF);
2105 * Emit IR to pop the LMF:
2106 * *(lmf->lmf_addr) = lmf->prev_lmf
2108 /* This could be called before emit_push_lmf () */
2109 if (!cfg->lmf_addr_var)
2110 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2111 lmf_addr_reg = cfg->lmf_addr_var->dreg;
2113 prev_lmf_reg = alloc_preg (cfg);
2114 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
2115 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_addr_reg, 0, prev_lmf_reg);
2120 emit_instrumentation_call (MonoCompile *cfg, void *func)
2122 MonoInst *iargs [1];
2125 * Avoid instrumenting inlined methods since it can
2126 * distort profiling results.
2128 if (cfg->method != cfg->current_method)
2131 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE) {
2132 EMIT_NEW_METHODCONST (cfg, iargs [0], cfg->method);
2133 mono_emit_jit_icall (cfg, func, iargs);
2138 ret_type_to_call_opcode (MonoCompile *cfg, MonoType *type, int calli, int virt)
2141 type = mini_get_underlying_type (type);
2142 switch (type->type) {
2143 case MONO_TYPE_VOID:
2144 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALL_MEMBASE: OP_VOIDCALL;
2151 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2155 case MONO_TYPE_FNPTR:
2156 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2157 case MONO_TYPE_CLASS:
2158 case MONO_TYPE_STRING:
2159 case MONO_TYPE_OBJECT:
2160 case MONO_TYPE_SZARRAY:
2161 case MONO_TYPE_ARRAY:
2162 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2165 return calli? OP_LCALL_REG: virt? OP_LCALL_MEMBASE: OP_LCALL;
2168 return calli? OP_RCALL_REG: virt? OP_RCALL_MEMBASE: OP_RCALL;
2170 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
2172 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
2173 case MONO_TYPE_VALUETYPE:
2174 if (type->data.klass->enumtype) {
2175 type = mono_class_enum_basetype (type->data.klass);
2178 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2179 case MONO_TYPE_TYPEDBYREF:
2180 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2181 case MONO_TYPE_GENERICINST:
2182 type = &type->data.generic_class->container_class->byval_arg;
2185 case MONO_TYPE_MVAR:
2187 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2189 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
2194 //XXX this ignores if t is byref
2195 #define MONO_TYPE_IS_PRIMITIVE_SCALAR(t) ((((((t)->type >= MONO_TYPE_BOOLEAN && (t)->type <= MONO_TYPE_U8) || ((t)->type >= MONO_TYPE_I && (t)->type <= MONO_TYPE_U)))))
2198 * target_type_is_incompatible:
2199 * @cfg: MonoCompile context
2201 * Check that the item @arg on the evaluation stack can be stored
2202 * in the target type (can be a local, or field, etc).
2203 * The cfg arg can be used to check if we need verification or just
2206 * Returns: non-0 value if arg can't be stored on a target.
2209 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
2211 MonoType *simple_type;
2214 if (target->byref) {
2215 /* FIXME: check that the pointed to types match */
2216 if (arg->type == STACK_MP) {
2217 /* This is needed to handle gshared types + ldaddr. We lower the types so we can handle enums and other typedef-like types. */
2218 MonoClass *target_class_lowered = mono_class_from_mono_type (mini_get_underlying_type (&mono_class_from_mono_type (target)->byval_arg));
2219 MonoClass *source_class_lowered = mono_class_from_mono_type (mini_get_underlying_type (&arg->klass->byval_arg));
2221 /* if the target is native int& or same type */
2222 if (target->type == MONO_TYPE_I || target_class_lowered == source_class_lowered)
2225 /* Both are primitive type byrefs and the source points to a larger type that the destination */
2226 if (MONO_TYPE_IS_PRIMITIVE_SCALAR (&target_class_lowered->byval_arg) && MONO_TYPE_IS_PRIMITIVE_SCALAR (&source_class_lowered->byval_arg) &&
2227 mono_class_instance_size (target_class_lowered) <= mono_class_instance_size (source_class_lowered))
2231 if (arg->type == STACK_PTR)
2236 simple_type = mini_get_underlying_type (target);
2237 switch (simple_type->type) {
2238 case MONO_TYPE_VOID:
2246 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
2250 /* STACK_MP is needed when setting pinned locals */
2251 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2256 case MONO_TYPE_FNPTR:
2258 * Some opcodes like ldloca returns 'transient pointers' which can be stored in
2259 * in native int. (#688008).
2261 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2264 case MONO_TYPE_CLASS:
2265 case MONO_TYPE_STRING:
2266 case MONO_TYPE_OBJECT:
2267 case MONO_TYPE_SZARRAY:
2268 case MONO_TYPE_ARRAY:
2269 if (arg->type != STACK_OBJ)
2271 /* FIXME: check type compatibility */
2275 if (arg->type != STACK_I8)
2279 if (arg->type != cfg->r4_stack_type)
2283 if (arg->type != STACK_R8)
2286 case MONO_TYPE_VALUETYPE:
2287 if (arg->type != STACK_VTYPE)
2289 klass = mono_class_from_mono_type (simple_type);
2290 if (klass != arg->klass)
2293 case MONO_TYPE_TYPEDBYREF:
2294 if (arg->type != STACK_VTYPE)
2296 klass = mono_class_from_mono_type (simple_type);
2297 if (klass != arg->klass)
2300 case MONO_TYPE_GENERICINST:
2301 if (mono_type_generic_inst_is_valuetype (simple_type)) {
2302 MonoClass *target_class;
2303 if (arg->type != STACK_VTYPE)
2305 klass = mono_class_from_mono_type (simple_type);
2306 target_class = mono_class_from_mono_type (target);
2307 /* The second cases is needed when doing partial sharing */
2308 if (klass != arg->klass && target_class != arg->klass && target_class != mono_class_from_mono_type (mini_get_underlying_type (&arg->klass->byval_arg)))
2312 if (arg->type != STACK_OBJ)
2314 /* FIXME: check type compatibility */
2318 case MONO_TYPE_MVAR:
2319 g_assert (cfg->gshared);
2320 if (mini_type_var_is_vt (simple_type)) {
2321 if (arg->type != STACK_VTYPE)
2324 if (arg->type != STACK_OBJ)
2329 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
2335 * Prepare arguments for passing to a function call.
2336 * Return a non-zero value if the arguments can't be passed to the given
2338 * The type checks are not yet complete and some conversions may need
2339 * casts on 32 or 64 bit architectures.
2341 * FIXME: implement this using target_type_is_incompatible ()
2344 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
2346 MonoType *simple_type;
2350 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
2354 for (i = 0; i < sig->param_count; ++i) {
2355 if (sig->params [i]->byref) {
2356 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
2360 simple_type = mini_get_underlying_type (sig->params [i]);
2362 switch (simple_type->type) {
2363 case MONO_TYPE_VOID:
2372 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2378 case MONO_TYPE_FNPTR:
2379 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2382 case MONO_TYPE_CLASS:
2383 case MONO_TYPE_STRING:
2384 case MONO_TYPE_OBJECT:
2385 case MONO_TYPE_SZARRAY:
2386 case MONO_TYPE_ARRAY:
2387 if (args [i]->type != STACK_OBJ)
2392 if (args [i]->type != STACK_I8)
2396 if (args [i]->type != cfg->r4_stack_type)
2400 if (args [i]->type != STACK_R8)
2403 case MONO_TYPE_VALUETYPE:
2404 if (simple_type->data.klass->enumtype) {
2405 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2408 if (args [i]->type != STACK_VTYPE)
2411 case MONO_TYPE_TYPEDBYREF:
2412 if (args [i]->type != STACK_VTYPE)
2415 case MONO_TYPE_GENERICINST:
2416 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2419 case MONO_TYPE_MVAR:
2421 if (args [i]->type != STACK_VTYPE)
2425 g_error ("unknown type 0x%02x in check_call_signature",
2433 callvirt_to_call (int opcode)
2436 case OP_CALL_MEMBASE:
2438 case OP_VOIDCALL_MEMBASE:
2440 case OP_FCALL_MEMBASE:
2442 case OP_RCALL_MEMBASE:
2444 case OP_VCALL_MEMBASE:
2446 case OP_LCALL_MEMBASE:
2449 g_assert_not_reached ();
2456 callvirt_to_call_reg (int opcode)
2459 case OP_CALL_MEMBASE:
2461 case OP_VOIDCALL_MEMBASE:
2462 return OP_VOIDCALL_REG;
2463 case OP_FCALL_MEMBASE:
2464 return OP_FCALL_REG;
2465 case OP_RCALL_MEMBASE:
2466 return OP_RCALL_REG;
2467 case OP_VCALL_MEMBASE:
2468 return OP_VCALL_REG;
2469 case OP_LCALL_MEMBASE:
2470 return OP_LCALL_REG;
2472 g_assert_not_reached ();
2478 /* Either METHOD or IMT_ARG needs to be set */
2480 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoMethod *method, MonoInst *imt_arg)
2484 if (COMPILE_LLVM (cfg)) {
2486 method_reg = alloc_preg (cfg);
2487 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2489 MonoInst *ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_METHODCONST, method);
2490 method_reg = ins->dreg;
2494 call->imt_arg_reg = method_reg;
2496 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2501 method_reg = alloc_preg (cfg);
2502 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2504 MonoInst *ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_METHODCONST, method);
2505 method_reg = ins->dreg;
2508 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2511 static MonoJumpInfo *
2512 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2514 MonoJumpInfo *ji = (MonoJumpInfo *)mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2518 ji->data.target = target;
2524 mini_class_check_context_used (MonoCompile *cfg, MonoClass *klass)
2527 return mono_class_check_context_used (klass);
2533 mini_method_check_context_used (MonoCompile *cfg, MonoMethod *method)
2536 return mono_method_check_context_used (method);
2542 * check_method_sharing:
2544 * Check whenever the vtable or an mrgctx needs to be passed when calling CMETHOD.
2547 check_method_sharing (MonoCompile *cfg, MonoMethod *cmethod, gboolean *out_pass_vtable, gboolean *out_pass_mrgctx)
2549 gboolean pass_vtable = FALSE;
2550 gboolean pass_mrgctx = FALSE;
2552 if (((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
2553 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
2554 gboolean sharable = FALSE;
2556 if (mono_method_is_generic_sharable_full (cmethod, TRUE, TRUE, TRUE))
2560 * Pass vtable iff target method might
2561 * be shared, which means that sharing
2562 * is enabled for its class and its
2563 * context is sharable (and it's not a
2566 if (sharable && !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
2570 if (mini_method_get_context (cmethod) &&
2571 mini_method_get_context (cmethod)->method_inst) {
2572 g_assert (!pass_vtable);
2574 if (mono_method_is_generic_sharable_full (cmethod, TRUE, TRUE, TRUE)) {
2577 if (cfg->gsharedvt && mini_is_gsharedvt_signature (mono_method_signature (cmethod)))
2582 if (out_pass_vtable)
2583 *out_pass_vtable = pass_vtable;
2584 if (out_pass_mrgctx)
2585 *out_pass_mrgctx = pass_mrgctx;
2588 inline static MonoCallInst *
2589 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2590 MonoInst **args, int calli, int virtual_, int tail, int rgctx, int unbox_trampoline)
2594 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2602 emit_instrumentation_call (cfg, mono_profiler_method_leave);
2604 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2606 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (cfg, sig->ret, calli, virtual_));
2609 call->signature = sig;
2610 call->rgctx_reg = rgctx;
2611 sig_ret = mini_get_underlying_type (sig->ret);
2613 type_to_eval_stack_type ((cfg), sig_ret, &call->inst);
2616 if (mini_type_is_vtype (sig_ret)) {
2617 call->vret_var = cfg->vret_addr;
2618 //g_assert_not_reached ();
2620 } else if (mini_type_is_vtype (sig_ret)) {
2621 MonoInst *temp = mono_compile_create_var (cfg, sig_ret, OP_LOCAL);
2624 temp->backend.is_pinvoke = sig->pinvoke;
2627 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2628 * address of return value to increase optimization opportunities.
2629 * Before vtype decomposition, the dreg of the call ins itself represents the
2630 * fact the call modifies the return value. After decomposition, the call will
2631 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2632 * will be transformed into an LDADDR.
2634 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2635 loada->dreg = alloc_preg (cfg);
2636 loada->inst_p0 = temp;
2637 /* We reference the call too since call->dreg could change during optimization */
2638 loada->inst_p1 = call;
2639 MONO_ADD_INS (cfg->cbb, loada);
2641 call->inst.dreg = temp->dreg;
2643 call->vret_var = loada;
2644 } else if (!MONO_TYPE_IS_VOID (sig_ret))
2645 call->inst.dreg = alloc_dreg (cfg, (MonoStackType)call->inst.type);
2647 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2648 if (COMPILE_SOFT_FLOAT (cfg)) {
2650 * If the call has a float argument, we would need to do an r8->r4 conversion using
2651 * an icall, but that cannot be done during the call sequence since it would clobber
2652 * the call registers + the stack. So we do it before emitting the call.
2654 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2656 MonoInst *in = call->args [i];
2658 if (i >= sig->hasthis)
2659 t = sig->params [i - sig->hasthis];
2661 t = &mono_defaults.int_class->byval_arg;
2662 t = mono_type_get_underlying_type (t);
2664 if (!t->byref && t->type == MONO_TYPE_R4) {
2665 MonoInst *iargs [1];
2669 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2671 /* The result will be in an int vreg */
2672 call->args [i] = conv;
2678 call->need_unbox_trampoline = unbox_trampoline;
2681 if (COMPILE_LLVM (cfg))
2682 mono_llvm_emit_call (cfg, call);
2684 mono_arch_emit_call (cfg, call);
2686 mono_arch_emit_call (cfg, call);
2689 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2690 cfg->flags |= MONO_CFG_HAS_CALLS;
2696 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2698 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2699 cfg->uses_rgctx_reg = TRUE;
2700 call->rgctx_reg = TRUE;
2702 call->rgctx_arg_reg = rgctx_reg;
2706 inline static MonoInst*
2707 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *imt_arg, MonoInst *rgctx_arg)
2712 gboolean check_sp = FALSE;
2714 if (cfg->check_pinvoke_callconv && cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
2715 WrapperInfo *info = mono_marshal_get_wrapper_info (cfg->method);
2717 if (info && info->subtype == WRAPPER_SUBTYPE_PINVOKE)
2722 rgctx_reg = mono_alloc_preg (cfg);
2723 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2727 if (!cfg->stack_inbalance_var)
2728 cfg->stack_inbalance_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2730 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2731 ins->dreg = cfg->stack_inbalance_var->dreg;
2732 MONO_ADD_INS (cfg->cbb, ins);
2735 call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE, rgctx_arg ? TRUE : FALSE, FALSE);
2737 call->inst.sreg1 = addr->dreg;
2740 emit_imt_argument (cfg, call, NULL, imt_arg);
2742 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2747 sp_reg = mono_alloc_preg (cfg);
2749 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2751 MONO_ADD_INS (cfg->cbb, ins);
2753 /* Restore the stack so we don't crash when throwing the exception */
2754 MONO_INST_NEW (cfg, ins, OP_SET_SP);
2755 ins->sreg1 = cfg->stack_inbalance_var->dreg;
2756 MONO_ADD_INS (cfg->cbb, ins);
2758 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, cfg->stack_inbalance_var->dreg, sp_reg);
2759 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ExecutionEngineException");
2763 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2765 return (MonoInst*)call;
2769 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2772 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type);
2774 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2777 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig, gboolean tail,
2778 MonoInst **args, MonoInst *this_ins, MonoInst *imt_arg, MonoInst *rgctx_arg)
2780 #ifndef DISABLE_REMOTING
2781 gboolean might_be_remote = FALSE;
2783 gboolean virtual_ = this_ins != NULL;
2784 gboolean enable_for_aot = TRUE;
2787 MonoInst *call_target = NULL;
2789 gboolean need_unbox_trampoline;
2792 sig = mono_method_signature (method);
2794 if (cfg->llvm_only && (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE))
2795 g_assert_not_reached ();
2798 rgctx_reg = mono_alloc_preg (cfg);
2799 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2802 if (method->string_ctor) {
2803 /* Create the real signature */
2804 /* FIXME: Cache these */
2805 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2806 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2811 context_used = mini_method_check_context_used (cfg, method);
2813 #ifndef DISABLE_REMOTING
2814 might_be_remote = this_ins && sig->hasthis &&
2815 (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) &&
2816 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && (!MONO_CHECK_THIS (this_ins) || context_used);
2818 if (might_be_remote && context_used) {
2821 g_assert (cfg->gshared);
2823 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2825 return mono_emit_calli (cfg, sig, args, addr, NULL, NULL);
2829 if (cfg->llvm_only && !call_target && virtual_ && (method->flags & METHOD_ATTRIBUTE_VIRTUAL))
2830 return emit_llvmonly_virtual_call (cfg, method, sig, 0, args);
2832 need_unbox_trampoline = method->klass == mono_defaults.object_class || (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE);
2834 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual_, tail, rgctx_arg ? TRUE : FALSE, need_unbox_trampoline);
2836 #ifndef DISABLE_REMOTING
2837 if (might_be_remote)
2838 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2841 call->method = method;
2842 call->inst.flags |= MONO_INST_HAS_METHOD;
2843 call->inst.inst_left = this_ins;
2844 call->tail_call = tail;
2847 int vtable_reg, slot_reg, this_reg;
2850 this_reg = this_ins->dreg;
2852 if (!cfg->llvm_only && (method->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (method->name, "Invoke")) {
2853 MonoInst *dummy_use;
2855 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2857 /* Make a call to delegate->invoke_impl */
2858 call->inst.inst_basereg = this_reg;
2859 call->inst.inst_offset = MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2860 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2862 /* We must emit a dummy use here because the delegate trampoline will
2863 replace the 'this' argument with the delegate target making this activation
2864 no longer a root for the delegate.
2865 This is an issue for delegates that target collectible code such as dynamic
2866 methods of GC'able assemblies.
2868 For a test case look into #667921.
2870 FIXME: a dummy use is not the best way to do it as the local register allocator
2871 will put it on a caller save register and spil it around the call.
2872 Ideally, we would either put it on a callee save register or only do the store part.
2874 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [0]);
2876 return (MonoInst*)call;
2879 if ((!cfg->compile_aot || enable_for_aot) &&
2880 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2881 (MONO_METHOD_IS_FINAL (method) &&
2882 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2883 !(mono_class_is_marshalbyref (method->klass) && context_used)) {
2885 * the method is not virtual, we just need to ensure this is not null
2886 * and then we can call the method directly.
2888 #ifndef DISABLE_REMOTING
2889 if (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) {
2891 * The check above ensures method is not gshared, this is needed since
2892 * gshared methods can't have wrappers.
2894 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2898 if (!method->string_ctor)
2899 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2901 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2902 } else if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2904 * the method is virtual, but we can statically dispatch since either
2905 * it's class or the method itself are sealed.
2906 * But first we need to ensure it's not a null reference.
2908 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2910 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2911 } else if (call_target) {
2912 vtable_reg = alloc_preg (cfg);
2913 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2915 call->inst.opcode = callvirt_to_call_reg (call->inst.opcode);
2916 call->inst.sreg1 = call_target->dreg;
2917 call->inst.flags &= !MONO_INST_HAS_METHOD;
2919 vtable_reg = alloc_preg (cfg);
2920 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2921 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2922 guint32 imt_slot = mono_method_get_imt_slot (method);
2923 emit_imt_argument (cfg, call, call->method, imt_arg);
2924 slot_reg = vtable_reg;
2925 offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2927 slot_reg = vtable_reg;
2928 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) +
2929 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2931 g_assert (mono_method_signature (method)->generic_param_count);
2932 emit_imt_argument (cfg, call, call->method, imt_arg);
2936 call->inst.sreg1 = slot_reg;
2937 call->inst.inst_offset = offset;
2938 call->is_virtual = TRUE;
2942 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2945 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2947 return (MonoInst*)call;
2951 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this_ins)
2953 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), FALSE, args, this_ins, NULL, NULL);
2957 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2964 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE, FALSE, FALSE);
2967 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2969 return (MonoInst*)call;
2973 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2975 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2979 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2983 * mono_emit_abs_call:
2985 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2987 inline static MonoInst*
2988 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2989 MonoMethodSignature *sig, MonoInst **args)
2991 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2995 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2998 if (cfg->abs_patches == NULL)
2999 cfg->abs_patches = g_hash_table_new (NULL, NULL);
3000 g_hash_table_insert (cfg->abs_patches, ji, ji);
3001 ins = mono_emit_native_call (cfg, ji, sig, args);
3002 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
3006 static MonoMethodSignature*
3007 sig_to_rgctx_sig (MonoMethodSignature *sig)
3009 // FIXME: memory allocation
3010 MonoMethodSignature *res;
3013 res = (MonoMethodSignature *)g_malloc (MONO_SIZEOF_METHOD_SIGNATURE + (sig->param_count + 1) * sizeof (MonoType*));
3014 memcpy (res, sig, MONO_SIZEOF_METHOD_SIGNATURE);
3015 res->param_count = sig->param_count + 1;
3016 for (i = 0; i < sig->param_count; ++i)
3017 res->params [i] = sig->params [i];
3018 res->params [sig->param_count] = &mono_defaults.int_class->this_arg;
3022 /* Make an indirect call to FSIG passing an additional argument */
3024 emit_extra_arg_calli (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **orig_args, int arg_reg, MonoInst *call_target)
3026 MonoMethodSignature *csig;
3027 MonoInst *args_buf [16];
3029 int i, pindex, tmp_reg;
3031 /* Make a call with an rgctx/extra arg */
3032 if (fsig->param_count + 2 < 16)
3035 args = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (fsig->param_count + 2));
3038 args [pindex ++] = orig_args [0];
3039 for (i = 0; i < fsig->param_count; ++i)
3040 args [pindex ++] = orig_args [fsig->hasthis + i];
3041 tmp_reg = alloc_preg (cfg);
3042 EMIT_NEW_UNALU (cfg, args [pindex], OP_MOVE, tmp_reg, arg_reg);
3043 csig = sig_to_rgctx_sig (fsig);
3044 return mono_emit_calli (cfg, csig, args, call_target, NULL, NULL);
3047 /* Emit an indirect call to the function descriptor ADDR */
3049 emit_llvmonly_calli (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, MonoInst *addr)
3051 int addr_reg, arg_reg;
3052 MonoInst *call_target;
3054 g_assert (cfg->llvm_only);
3057 * addr points to a <addr, arg> pair, load both of them, and
3058 * make a call to addr, passing arg as an extra arg.
3060 addr_reg = alloc_preg (cfg);
3061 EMIT_NEW_LOAD_MEMBASE (cfg, call_target, OP_LOAD_MEMBASE, addr_reg, addr->dreg, 0);
3062 arg_reg = alloc_preg (cfg);
3063 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, arg_reg, addr->dreg, sizeof (gpointer));
3065 return emit_extra_arg_calli (cfg, fsig, args, arg_reg, call_target);
3069 direct_icalls_enabled (MonoCompile *cfg)
3071 /* LLVM on amd64 can't handle calls to non-32 bit addresses */
3073 if (cfg->compile_llvm && !cfg->llvm_only)
3076 if (cfg->gen_sdb_seq_points || cfg->disable_direct_icalls)
3082 mono_emit_jit_icall_by_info (MonoCompile *cfg, int il_offset, MonoJitICallInfo *info, MonoInst **args)
3085 * Call the jit icall without a wrapper if possible.
3086 * The wrapper is needed for the following reasons:
3087 * - to handle exceptions thrown using mono_raise_exceptions () from the
3088 * icall function. The EH code needs the lmf frame pushed by the
3089 * wrapper to be able to unwind back to managed code.
3090 * - to be able to do stack walks for asynchronously suspended
3091 * threads when debugging.
3093 if (info->no_raise && direct_icalls_enabled (cfg)) {
3097 if (!info->wrapper_method) {
3098 name = g_strdup_printf ("__icall_wrapper_%s", info->name);
3099 info->wrapper_method = mono_marshal_get_icall_wrapper (info->sig, name, info->func, TRUE);
3101 mono_memory_barrier ();
3105 * Inline the wrapper method, which is basically a call to the C icall, and
3106 * an exception check.
3108 costs = inline_method (cfg, info->wrapper_method, NULL,
3109 args, NULL, il_offset, TRUE);
3110 g_assert (costs > 0);
3111 g_assert (!MONO_TYPE_IS_VOID (info->sig->ret));
3115 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
3120 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
3122 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
3123 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
3127 * Native code might return non register sized integers
3128 * without initializing the upper bits.
3130 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
3131 case OP_LOADI1_MEMBASE:
3132 widen_op = OP_ICONV_TO_I1;
3134 case OP_LOADU1_MEMBASE:
3135 widen_op = OP_ICONV_TO_U1;
3137 case OP_LOADI2_MEMBASE:
3138 widen_op = OP_ICONV_TO_I2;
3140 case OP_LOADU2_MEMBASE:
3141 widen_op = OP_ICONV_TO_U2;
3147 if (widen_op != -1) {
3148 int dreg = alloc_preg (cfg);
3151 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
3152 widen->type = ins->type;
3163 emit_method_access_failure (MonoCompile *cfg, MonoMethod *method, MonoMethod *cil_method)
3165 MonoInst *args [16];
3167 args [0] = emit_get_rgctx_method (cfg, mono_method_check_context_used (method), method, MONO_RGCTX_INFO_METHOD);
3168 args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (cil_method), cil_method, MONO_RGCTX_INFO_METHOD);
3170 mono_emit_jit_icall (cfg, mono_throw_method_access, args);
3174 get_memcpy_method (void)
3176 static MonoMethod *memcpy_method = NULL;
3177 if (!memcpy_method) {
3178 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
3180 g_error ("Old corlib found. Install a new one");
3182 return memcpy_method;
3186 create_write_barrier_bitmap (MonoCompile *cfg, MonoClass *klass, unsigned *wb_bitmap, int offset)
3188 MonoClassField *field;
3189 gpointer iter = NULL;
3191 while ((field = mono_class_get_fields (klass, &iter))) {
3194 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
3196 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
3197 if (mini_type_is_reference (mono_field_get_type (field))) {
3198 g_assert ((foffset % SIZEOF_VOID_P) == 0);
3199 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
3201 MonoClass *field_class = mono_class_from_mono_type (field->type);
3202 if (field_class->has_references)
3203 create_write_barrier_bitmap (cfg, field_class, wb_bitmap, offset + foffset);
3209 emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value)
3211 int card_table_shift_bits;
3212 gpointer card_table_mask;
3214 MonoInst *dummy_use;
3215 int nursery_shift_bits;
3216 size_t nursery_size;
3218 if (!cfg->gen_write_barriers)
3221 card_table = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
3223 mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
3225 if (cfg->backend->have_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0 && !COMPILE_LLVM (cfg)) {
3228 MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
3229 wbarrier->sreg1 = ptr->dreg;
3230 wbarrier->sreg2 = value->dreg;
3231 MONO_ADD_INS (cfg->cbb, wbarrier);
3232 } else if (card_table && !cfg->compile_aot && !mono_gc_card_table_nursery_check ()) {
3233 int offset_reg = alloc_preg (cfg);
3237 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
3238 if (card_table_mask)
3239 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
3241 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
3242 * IMM's larger than 32bits.
3244 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR, NULL);
3245 card_reg = ins->dreg;
3247 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
3248 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
3250 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
3251 mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
3254 EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
3258 mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
3260 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
3261 unsigned need_wb = 0;
3266 /*types with references can't have alignment smaller than sizeof(void*) */
3267 if (align < SIZEOF_VOID_P)
3270 /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
3271 if (size > 32 * SIZEOF_VOID_P)
3274 create_write_barrier_bitmap (cfg, klass, &need_wb, 0);
3276 /* We don't unroll more than 5 stores to avoid code bloat. */
3277 if (size > 5 * SIZEOF_VOID_P) {
3278 /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
3279 size += (SIZEOF_VOID_P - 1);
3280 size &= ~(SIZEOF_VOID_P - 1);
3282 EMIT_NEW_ICONST (cfg, iargs [2], size);
3283 EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
3284 mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
3288 destreg = iargs [0]->dreg;
3289 srcreg = iargs [1]->dreg;
3292 dest_ptr_reg = alloc_preg (cfg);
3293 tmp_reg = alloc_preg (cfg);
3296 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
3298 while (size >= SIZEOF_VOID_P) {
3299 MonoInst *load_inst;
3300 MONO_INST_NEW (cfg, load_inst, OP_LOAD_MEMBASE);
3301 load_inst->dreg = tmp_reg;
3302 load_inst->inst_basereg = srcreg;
3303 load_inst->inst_offset = offset;
3304 MONO_ADD_INS (cfg->cbb, load_inst);
3306 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
3309 emit_write_barrier (cfg, iargs [0], load_inst);
3311 offset += SIZEOF_VOID_P;
3312 size -= SIZEOF_VOID_P;
3315 /*tmp += sizeof (void*)*/
3316 if (size >= SIZEOF_VOID_P) {
3317 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
3318 MONO_ADD_INS (cfg->cbb, iargs [0]);
3322 /* Those cannot be references since size < sizeof (void*) */
3324 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
3325 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
3331 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
3332 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
3338 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
3339 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
3348 * Emit code to copy a valuetype of type @klass whose address is stored in
3349 * @src->dreg to memory whose address is stored at @dest->dreg.
3352 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
3354 MonoInst *iargs [4];
3357 MonoMethod *memcpy_method;
3358 MonoInst *size_ins = NULL;
3359 MonoInst *memcpy_ins = NULL;
3363 klass = mono_class_from_mono_type (mini_get_underlying_type (&klass->byval_arg));
3366 * This check breaks with spilled vars... need to handle it during verification anyway.
3367 * g_assert (klass && klass == src->klass && klass == dest->klass);
3370 if (mini_is_gsharedvt_klass (klass)) {
3372 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3373 memcpy_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_MEMCPY);
3377 n = mono_class_native_size (klass, &align);
3379 n = mono_class_value_size (klass, &align);
3381 /* if native is true there should be no references in the struct */
3382 if (cfg->gen_write_barriers && (klass->has_references || size_ins) && !native) {
3383 /* Avoid barriers when storing to the stack */
3384 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
3385 (dest->opcode == OP_LDADDR))) {
3391 context_used = mini_class_check_context_used (cfg, klass);
3393 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
3394 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
3396 } else if (context_used) {
3397 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3399 iargs [2] = emit_runtime_constant (cfg, MONO_PATCH_INFO_CLASS, klass);
3400 if (!cfg->compile_aot)
3401 mono_class_compute_gc_descriptor (klass);
3405 mono_emit_jit_icall (cfg, mono_gsharedvt_value_copy, iargs);
3407 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
3412 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 8) {
3413 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
3414 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
3419 iargs [2] = size_ins;
3421 EMIT_NEW_ICONST (cfg, iargs [2], n);
3423 memcpy_method = get_memcpy_method ();
3425 mono_emit_calli (cfg, mono_method_signature (memcpy_method), iargs, memcpy_ins, NULL, NULL);
3427 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
3432 get_memset_method (void)
3434 static MonoMethod *memset_method = NULL;
3435 if (!memset_method) {
3436 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
3438 g_error ("Old corlib found. Install a new one");
3440 return memset_method;
3444 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
3446 MonoInst *iargs [3];
3449 MonoMethod *memset_method;
3450 MonoInst *size_ins = NULL;
3451 MonoInst *bzero_ins = NULL;
3452 static MonoMethod *bzero_method;
3454 /* FIXME: Optimize this for the case when dest is an LDADDR */
3455 mono_class_init (klass);
3456 if (mini_is_gsharedvt_klass (klass)) {
3457 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3458 bzero_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_BZERO);
3460 bzero_method = mono_class_get_method_from_name (mono_defaults.string_class, "bzero_aligned_1", 2);
3461 g_assert (bzero_method);
3463 iargs [1] = size_ins;
3464 mono_emit_calli (cfg, mono_method_signature (bzero_method), iargs, bzero_ins, NULL, NULL);
3468 klass = mono_class_from_mono_type (mini_get_underlying_type (&klass->byval_arg));
3470 n = mono_class_value_size (klass, &align);
3472 if (n <= sizeof (gpointer) * 8) {
3473 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
3476 memset_method = get_memset_method ();
3478 EMIT_NEW_ICONST (cfg, iargs [1], 0);
3479 EMIT_NEW_ICONST (cfg, iargs [2], n);
3480 mono_emit_method_call (cfg, memset_method, iargs, NULL);
3487 * Emit IR to return either the this pointer for instance method,
3488 * or the mrgctx for static methods.
3491 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
3493 MonoInst *this_ins = NULL;
3495 g_assert (cfg->gshared);
3497 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
3498 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
3499 !method->klass->valuetype)
3500 EMIT_NEW_ARGLOAD (cfg, this_ins, 0);
3502 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
3503 MonoInst *mrgctx_loc, *mrgctx_var;
3505 g_assert (!this_ins);
3506 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
3508 mrgctx_loc = mono_get_vtable_var (cfg);
3509 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
3512 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
3513 MonoInst *vtable_loc, *vtable_var;
3515 g_assert (!this_ins);
3517 vtable_loc = mono_get_vtable_var (cfg);
3518 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
3520 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
3521 MonoInst *mrgctx_var = vtable_var;
3524 vtable_reg = alloc_preg (cfg);
3525 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, MONO_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
3526 vtable_var->type = STACK_PTR;
3534 vtable_reg = alloc_preg (cfg);
3535 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this_ins->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3540 static MonoJumpInfoRgctxEntry *
3541 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, MonoRgctxInfoType info_type)
3543 MonoJumpInfoRgctxEntry *res = (MonoJumpInfoRgctxEntry *)mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
3544 res->method = method;
3545 res->in_mrgctx = in_mrgctx;
3546 res->data = (MonoJumpInfo *)mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
3547 res->data->type = patch_type;
3548 res->data->data.target = patch_data;
3549 res->info_type = info_type;
3554 static inline MonoInst*
3555 emit_rgctx_fetch_inline (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3557 MonoInst *args [16];
3560 // FIXME: No fastpath since the slot is not a compile time constant
3562 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_RGCTX_SLOT_INDEX, entry);
3563 if (entry->in_mrgctx)
3564 call = mono_emit_jit_icall (cfg, mono_fill_method_rgctx, args);
3566 call = mono_emit_jit_icall (cfg, mono_fill_class_rgctx, args);
3570 * FIXME: This can be called during decompose, which is a problem since it creates
3572 * Also, the fastpath doesn't work since the slot number is dynamically allocated.
3574 int i, slot, depth, index, rgctx_reg, val_reg, res_reg;
3576 MonoBasicBlock *is_null_bb, *end_bb;
3577 MonoInst *res, *ins, *call;
3580 slot = mini_get_rgctx_entry_slot (entry);
3582 mrgctx = MONO_RGCTX_SLOT_IS_MRGCTX (slot);
3583 index = MONO_RGCTX_SLOT_INDEX (slot);
3585 index += MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT / sizeof (gpointer);
3586 for (depth = 0; ; ++depth) {
3587 int size = mono_class_rgctx_get_array_size (depth, mrgctx);
3589 if (index < size - 1)
3594 NEW_BBLOCK (cfg, end_bb);
3595 NEW_BBLOCK (cfg, is_null_bb);
3598 rgctx_reg = rgctx->dreg;
3600 rgctx_reg = alloc_preg (cfg);
3602 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, rgctx_reg, rgctx->dreg, MONO_STRUCT_OFFSET (MonoVTable, runtime_generic_context));
3603 // FIXME: Avoid this check by allocating the table when the vtable is created etc.
3604 NEW_BBLOCK (cfg, is_null_bb);
3606 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rgctx_reg, 0);
3607 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3610 for (i = 0; i < depth; ++i) {
3611 int array_reg = alloc_preg (cfg);
3613 /* load ptr to next array */
3614 if (mrgctx && i == 0)
3615 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, rgctx_reg, MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT);
3617 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, rgctx_reg, 0);
3618 rgctx_reg = array_reg;
3619 /* is the ptr null? */
3620 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rgctx_reg, 0);
3621 /* if yes, jump to actual trampoline */
3622 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3626 val_reg = alloc_preg (cfg);
3627 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, val_reg, rgctx_reg, (index + 1) * sizeof (gpointer));
3628 /* is the slot null? */
3629 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, val_reg, 0);
3630 /* if yes, jump to actual trampoline */
3631 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3634 res_reg = alloc_preg (cfg);
3635 MONO_INST_NEW (cfg, ins, OP_MOVE);
3636 ins->dreg = res_reg;
3637 ins->sreg1 = val_reg;
3638 MONO_ADD_INS (cfg->cbb, ins);
3640 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3643 MONO_START_BB (cfg, is_null_bb);
3645 EMIT_NEW_ICONST (cfg, args [1], index);
3647 call = mono_emit_jit_icall (cfg, mono_fill_method_rgctx, args);
3649 call = mono_emit_jit_icall (cfg, mono_fill_class_rgctx, args);
3650 MONO_INST_NEW (cfg, ins, OP_MOVE);
3651 ins->dreg = res_reg;
3652 ins->sreg1 = call->dreg;
3653 MONO_ADD_INS (cfg->cbb, ins);
3654 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3656 MONO_START_BB (cfg, end_bb);
3665 * Emit IR to load the value of the rgctx entry ENTRY from the rgctx
3668 static inline MonoInst*
3669 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3672 return emit_rgctx_fetch_inline (cfg, rgctx, entry);
3674 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
3678 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
3679 MonoClass *klass, MonoRgctxInfoType rgctx_type)
3681 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
3682 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3684 return emit_rgctx_fetch (cfg, rgctx, entry);
3688 emit_get_rgctx_sig (MonoCompile *cfg, int context_used,
3689 MonoMethodSignature *sig, MonoRgctxInfoType rgctx_type)
3691 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_SIGNATURE, sig, rgctx_type);
3692 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3694 return emit_rgctx_fetch (cfg, rgctx, entry);
3698 emit_get_rgctx_gsharedvt_call (MonoCompile *cfg, int context_used,
3699 MonoMethodSignature *sig, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3701 MonoJumpInfoGSharedVtCall *call_info;
3702 MonoJumpInfoRgctxEntry *entry;
3705 call_info = (MonoJumpInfoGSharedVtCall *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoGSharedVtCall));
3706 call_info->sig = sig;
3707 call_info->method = cmethod;
3709 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_CALL, call_info, rgctx_type);
3710 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3712 return emit_rgctx_fetch (cfg, rgctx, entry);
3716 * emit_get_rgctx_virt_method:
3718 * Return data for method VIRT_METHOD for a receiver of type KLASS.
3721 emit_get_rgctx_virt_method (MonoCompile *cfg, int context_used,
3722 MonoClass *klass, MonoMethod *virt_method, MonoRgctxInfoType rgctx_type)
3724 MonoJumpInfoVirtMethod *info;
3725 MonoJumpInfoRgctxEntry *entry;
3728 info = (MonoJumpInfoVirtMethod *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoVirtMethod));
3729 info->klass = klass;
3730 info->method = virt_method;
3732 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_VIRT_METHOD, info, rgctx_type);
3733 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3735 return emit_rgctx_fetch (cfg, rgctx, entry);
3739 emit_get_rgctx_gsharedvt_method (MonoCompile *cfg, int context_used,
3740 MonoMethod *cmethod, MonoGSharedVtMethodInfo *info)
3742 MonoJumpInfoRgctxEntry *entry;
3745 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_METHOD, info, MONO_RGCTX_INFO_METHOD_GSHAREDVT_INFO);
3746 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3748 return emit_rgctx_fetch (cfg, rgctx, entry);
3752 * emit_get_rgctx_method:
3754 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
3755 * normal constants, else emit a load from the rgctx.
3758 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
3759 MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3761 if (!context_used) {
3764 switch (rgctx_type) {
3765 case MONO_RGCTX_INFO_METHOD:
3766 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
3768 case MONO_RGCTX_INFO_METHOD_RGCTX:
3769 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
3772 g_assert_not_reached ();
3775 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
3776 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3778 return emit_rgctx_fetch (cfg, rgctx, entry);
3783 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
3784 MonoClassField *field, MonoRgctxInfoType rgctx_type)
3786 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
3787 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3789 return emit_rgctx_fetch (cfg, rgctx, entry);
3793 get_gsharedvt_info_slot (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3795 MonoGSharedVtMethodInfo *info = cfg->gsharedvt_info;
3796 MonoRuntimeGenericContextInfoTemplate *template_;
3801 for (i = 0; i < info->num_entries; ++i) {
3802 MonoRuntimeGenericContextInfoTemplate *otemplate = &info->entries [i];
3804 if (otemplate->info_type == rgctx_type && otemplate->data == data && rgctx_type != MONO_RGCTX_INFO_LOCAL_OFFSET)
3808 if (info->num_entries == info->count_entries) {
3809 MonoRuntimeGenericContextInfoTemplate *new_entries;
3810 int new_count_entries = info->count_entries ? info->count_entries * 2 : 16;
3812 new_entries = (MonoRuntimeGenericContextInfoTemplate *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * new_count_entries);
3814 memcpy (new_entries, info->entries, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
3815 info->entries = new_entries;
3816 info->count_entries = new_count_entries;
3819 idx = info->num_entries;
3820 template_ = &info->entries [idx];
3821 template_->info_type = rgctx_type;
3822 template_->data = data;
3824 info->num_entries ++;
3830 * emit_get_gsharedvt_info:
3832 * This is similar to emit_get_rgctx_.., but loads the data from the gsharedvt info var instead of calling an rgctx fetch trampoline.
3835 emit_get_gsharedvt_info (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3840 idx = get_gsharedvt_info_slot (cfg, data, rgctx_type);
3841 /* Load info->entries [idx] */
3842 dreg = alloc_preg (cfg);
3843 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, cfg->gsharedvt_info_var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
3849 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type)
3851 return emit_get_gsharedvt_info (cfg, &klass->byval_arg, rgctx_type);
3855 * On return the caller must check @klass for load errors.
3858 emit_class_init (MonoCompile *cfg, MonoClass *klass)
3860 MonoInst *vtable_arg;
3863 context_used = mini_class_check_context_used (cfg, klass);
3866 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
3867 klass, MONO_RGCTX_INFO_VTABLE);
3869 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3873 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
3876 if (!COMPILE_LLVM (cfg) && cfg->backend->have_op_generic_class_init) {
3880 * Using an opcode instead of emitting IR here allows the hiding of the call inside the opcode,
3881 * so this doesn't have to clobber any regs and it doesn't break basic blocks.
3883 MONO_INST_NEW (cfg, ins, OP_GENERIC_CLASS_INIT);
3884 ins->sreg1 = vtable_arg->dreg;
3885 MONO_ADD_INS (cfg->cbb, ins);
3887 static int byte_offset = -1;
3888 static guint8 bitmask;
3889 int bits_reg, inited_reg;
3890 MonoBasicBlock *inited_bb;
3891 MonoInst *args [16];
3893 if (byte_offset < 0)
3894 mono_marshal_find_bitfield_offset (MonoVTable, initialized, &byte_offset, &bitmask);
3896 bits_reg = alloc_ireg (cfg);
3897 inited_reg = alloc_ireg (cfg);
3899 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, bits_reg, vtable_arg->dreg, byte_offset);
3900 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, inited_reg, bits_reg, bitmask);
3902 NEW_BBLOCK (cfg, inited_bb);
3904 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, inited_reg, 0);
3905 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBNE_UN, inited_bb);
3907 args [0] = vtable_arg;
3908 mono_emit_jit_icall (cfg, mono_generic_class_init, args);
3910 MONO_START_BB (cfg, inited_bb);
3915 emit_seq_point (MonoCompile *cfg, MonoMethod *method, guint8* ip, gboolean intr_loc, gboolean nonempty_stack)
3919 if (cfg->gen_seq_points && cfg->method == method) {
3920 NEW_SEQ_POINT (cfg, ins, ip - cfg->header->code, intr_loc);
3922 ins->flags |= MONO_INST_NONEMPTY_STACK;
3923 MONO_ADD_INS (cfg->cbb, ins);
3928 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg, gboolean null_check)
3930 if (mini_get_debug_options ()->better_cast_details) {
3931 int vtable_reg = alloc_preg (cfg);
3932 int klass_reg = alloc_preg (cfg);
3933 MonoBasicBlock *is_null_bb = NULL;
3935 int to_klass_reg, context_used;
3938 NEW_BBLOCK (cfg, is_null_bb);
3940 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3941 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3944 tls_get = mono_get_jit_tls_intrinsic (cfg);
3946 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3950 MONO_ADD_INS (cfg->cbb, tls_get);
3951 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3952 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3954 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3956 context_used = mini_class_check_context_used (cfg, klass);
3958 MonoInst *class_ins;
3960 class_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3961 to_klass_reg = class_ins->dreg;
3963 to_klass_reg = alloc_preg (cfg);
3964 MONO_EMIT_NEW_CLASSCONST (cfg, to_klass_reg, klass);
3966 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3969 MONO_START_BB (cfg, is_null_bb);
3974 reset_cast_details (MonoCompile *cfg)
3976 /* Reset the variables holding the cast details */
3977 if (mini_get_debug_options ()->better_cast_details) {
3978 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3980 MONO_ADD_INS (cfg->cbb, tls_get);
3981 /* It is enough to reset the from field */
3982 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3987 * On return the caller must check @array_class for load errors
3990 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
3992 int vtable_reg = alloc_preg (cfg);
3995 context_used = mini_class_check_context_used (cfg, array_class);
3997 save_cast_details (cfg, array_class, obj->dreg, FALSE);
3999 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4001 if (cfg->opt & MONO_OPT_SHARED) {
4002 int class_reg = alloc_preg (cfg);
4005 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4006 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_CLASS, array_class);
4007 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, ins->dreg);
4008 } else if (context_used) {
4009 MonoInst *vtable_ins;
4011 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
4012 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
4014 if (cfg->compile_aot) {
4018 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
4020 vt_reg = alloc_preg (cfg);
4021 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
4022 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
4025 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
4027 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
4031 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
4033 reset_cast_details (cfg);
4037 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
4038 * generic code is generated.
4041 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
4043 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
4046 MonoInst *rgctx, *addr;
4048 /* FIXME: What if the class is shared? We might not
4049 have to get the address of the method from the
4051 addr = emit_get_rgctx_method (cfg, context_used, method,
4052 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
4053 if (cfg->llvm_only) {
4054 cfg->signatures = g_slist_prepend_mempool (cfg->mempool, cfg->signatures, mono_method_signature (method));
4055 return emit_llvmonly_calli (cfg, mono_method_signature (method), &val, addr);
4057 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
4059 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
4062 gboolean pass_vtable, pass_mrgctx;
4063 MonoInst *rgctx_arg = NULL;
4065 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
4066 g_assert (!pass_mrgctx);
4069 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4072 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
4075 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
4080 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
4084 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
4085 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
4086 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
4087 int rank_reg = alloc_dreg (cfg ,STACK_I4);
4089 obj_reg = sp [0]->dreg;
4090 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4091 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
4093 /* FIXME: generics */
4094 g_assert (klass->rank == 0);
4097 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
4098 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
4100 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4101 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, element_class));
4104 MonoInst *element_class;
4106 /* This assertion is from the unboxcast insn */
4107 g_assert (klass->rank == 0);
4109 element_class = emit_get_rgctx_klass (cfg, context_used,
4110 klass, MONO_RGCTX_INFO_ELEMENT_KLASS);
4112 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
4113 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
4115 save_cast_details (cfg, klass->element_class, obj_reg, FALSE);
4116 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
4117 reset_cast_details (cfg);
4120 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), obj_reg, sizeof (MonoObject));
4121 MONO_ADD_INS (cfg->cbb, add);
4122 add->type = STACK_MP;
4129 handle_unbox_gsharedvt (MonoCompile *cfg, MonoClass *klass, MonoInst *obj)
4131 MonoInst *addr, *klass_inst, *is_ref, *args[16];
4132 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
4136 klass_inst = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_KLASS);
4142 args [1] = klass_inst;
4145 obj = mono_emit_jit_icall (cfg, mono_object_castclass_unbox, args);
4147 NEW_BBLOCK (cfg, is_ref_bb);
4148 NEW_BBLOCK (cfg, is_nullable_bb);
4149 NEW_BBLOCK (cfg, end_bb);
4150 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
4151 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
4152 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
4154 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_NULLABLE);
4155 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
4157 /* This will contain either the address of the unboxed vtype, or an address of the temporary where the ref is stored */
4158 addr_reg = alloc_dreg (cfg, STACK_MP);
4162 NEW_BIALU_IMM (cfg, addr, OP_ADD_IMM, addr_reg, obj->dreg, sizeof (MonoObject));
4163 MONO_ADD_INS (cfg->cbb, addr);
4165 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4168 MONO_START_BB (cfg, is_ref_bb);
4170 /* Save the ref to a temporary */
4171 dreg = alloc_ireg (cfg);
4172 EMIT_NEW_VARLOADA_VREG (cfg, addr, dreg, &klass->byval_arg);
4173 addr->dreg = addr_reg;
4174 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, obj->dreg);
4175 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4178 MONO_START_BB (cfg, is_nullable_bb);
4181 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_NULLABLE_CLASS_UNBOX);
4182 MonoInst *unbox_call;
4183 MonoMethodSignature *unbox_sig;
4185 unbox_sig = (MonoMethodSignature *)mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
4186 unbox_sig->ret = &klass->byval_arg;
4187 unbox_sig->param_count = 1;
4188 unbox_sig->params [0] = &mono_defaults.object_class->byval_arg;
4191 unbox_call = emit_llvmonly_calli (cfg, unbox_sig, &obj, addr);
4193 unbox_call = mono_emit_calli (cfg, unbox_sig, &obj, addr, NULL, NULL);
4195 EMIT_NEW_VARLOADA_VREG (cfg, addr, unbox_call->dreg, &klass->byval_arg);
4196 addr->dreg = addr_reg;
4199 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4202 MONO_START_BB (cfg, end_bb);
4205 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr_reg, 0);
4211 * Returns NULL and set the cfg exception on error.
4214 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
4216 MonoInst *iargs [2];
4221 MonoRgctxInfoType rgctx_info;
4222 MonoInst *iargs [2];
4223 gboolean known_instance_size = !mini_is_gsharedvt_klass (klass);
4225 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (klass, for_box, known_instance_size);
4227 if (cfg->opt & MONO_OPT_SHARED)
4228 rgctx_info = MONO_RGCTX_INFO_KLASS;
4230 rgctx_info = MONO_RGCTX_INFO_VTABLE;
4231 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
4233 if (cfg->opt & MONO_OPT_SHARED) {
4234 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4236 alloc_ftn = ves_icall_object_new;
4239 alloc_ftn = ves_icall_object_new_specific;
4242 if (managed_alloc && !(cfg->opt & MONO_OPT_SHARED)) {
4243 if (known_instance_size) {
4244 int size = mono_class_instance_size (klass);
4245 if (size < sizeof (MonoObject))
4246 g_error ("Invalid size %d for class %s", size, mono_type_get_full_name (klass));
4248 EMIT_NEW_ICONST (cfg, iargs [1], size);
4250 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
4253 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
4256 if (cfg->opt & MONO_OPT_SHARED) {
4257 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4258 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
4260 alloc_ftn = ves_icall_object_new;
4261 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
4262 /* This happens often in argument checking code, eg. throw new FooException... */
4263 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
4264 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
4265 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
4267 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
4268 MonoMethod *managed_alloc = NULL;
4272 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4273 cfg->exception_ptr = klass;
4277 managed_alloc = mono_gc_get_managed_allocator (klass, for_box, TRUE);
4279 if (managed_alloc) {
4280 int size = mono_class_instance_size (klass);
4281 if (size < sizeof (MonoObject))
4282 g_error ("Invalid size %d for class %s", size, mono_type_get_full_name (klass));
4284 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4285 EMIT_NEW_ICONST (cfg, iargs [1], size);
4286 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
4288 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
4290 guint32 lw = vtable->klass->instance_size;
4291 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
4292 EMIT_NEW_ICONST (cfg, iargs [0], lw);
4293 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
4296 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4300 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
4304 * Returns NULL and set the cfg exception on error.
4307 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used)
4309 MonoInst *alloc, *ins;
4311 if (mono_class_is_nullable (klass)) {
4312 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
4315 if (cfg->llvm_only && cfg->gsharedvt) {
4316 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
4317 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
4318 return emit_llvmonly_calli (cfg, mono_method_signature (method), &val, addr);
4320 /* FIXME: What if the class is shared? We might not
4321 have to get the method address from the RGCTX. */
4322 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
4323 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
4324 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
4326 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
4329 gboolean pass_vtable, pass_mrgctx;
4330 MonoInst *rgctx_arg = NULL;
4332 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
4333 g_assert (!pass_mrgctx);
4336 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4339 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
4342 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
4346 if (mini_is_gsharedvt_klass (klass)) {
4347 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
4348 MonoInst *res, *is_ref, *src_var, *addr;
4351 dreg = alloc_ireg (cfg);
4353 NEW_BBLOCK (cfg, is_ref_bb);
4354 NEW_BBLOCK (cfg, is_nullable_bb);
4355 NEW_BBLOCK (cfg, end_bb);
4356 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
4357 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
4358 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
4360 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_NULLABLE);
4361 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
4364 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4367 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4368 ins->opcode = OP_STOREV_MEMBASE;
4370 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, alloc->dreg);
4371 res->type = STACK_OBJ;
4373 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4376 MONO_START_BB (cfg, is_ref_bb);
4378 /* val is a vtype, so has to load the value manually */
4379 src_var = get_vreg_to_inst (cfg, val->dreg);
4381 src_var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, val->dreg);
4382 EMIT_NEW_VARLOADA (cfg, addr, src_var, src_var->inst_vtype);
4383 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, addr->dreg, 0);
4384 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4387 MONO_START_BB (cfg, is_nullable_bb);
4390 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass,
4391 MONO_RGCTX_INFO_NULLABLE_CLASS_BOX);
4393 MonoMethodSignature *box_sig;
4396 * klass is Nullable<T>, need to call Nullable<T>.Box () using a gsharedvt signature, but we cannot
4397 * construct that method at JIT time, so have to do things by hand.
4399 box_sig = (MonoMethodSignature *)mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
4400 box_sig->ret = &mono_defaults.object_class->byval_arg;
4401 box_sig->param_count = 1;
4402 box_sig->params [0] = &klass->byval_arg;
4405 box_call = emit_llvmonly_calli (cfg, box_sig, &val, addr);
4407 box_call = mono_emit_calli (cfg, box_sig, &val, addr, NULL, NULL);
4408 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, box_call->dreg);
4409 res->type = STACK_OBJ;
4413 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4415 MONO_START_BB (cfg, end_bb);
4419 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4423 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4429 mini_class_has_reference_variant_generic_argument (MonoCompile *cfg, MonoClass *klass, int context_used)
4432 MonoGenericContainer *container;
4433 MonoGenericInst *ginst;
4435 if (klass->generic_class) {
4436 container = klass->generic_class->container_class->generic_container;
4437 ginst = klass->generic_class->context.class_inst;
4438 } else if (klass->generic_container && context_used) {
4439 container = klass->generic_container;
4440 ginst = container->context.class_inst;
4445 for (i = 0; i < container->type_argc; ++i) {
4447 if (!(mono_generic_container_get_param_info (container, i)->flags & (MONO_GEN_PARAM_VARIANT|MONO_GEN_PARAM_COVARIANT)))
4449 type = ginst->type_argv [i];
4450 if (mini_type_is_reference (type))
4456 static GHashTable* direct_icall_type_hash;
4459 icall_is_direct_callable (MonoCompile *cfg, MonoMethod *cmethod)
4461 /* LLVM on amd64 can't handle calls to non-32 bit addresses */
4462 if (!direct_icalls_enabled (cfg))
4466 * An icall is directly callable if it doesn't directly or indirectly call mono_raise_exception ().
4467 * Whitelist a few icalls for now.
4469 if (!direct_icall_type_hash) {
4470 GHashTable *h = g_hash_table_new (g_str_hash, g_str_equal);
4472 g_hash_table_insert (h, (char*)"Decimal", GUINT_TO_POINTER (1));
4473 g_hash_table_insert (h, (char*)"Number", GUINT_TO_POINTER (1));
4474 g_hash_table_insert (h, (char*)"Buffer", GUINT_TO_POINTER (1));
4475 g_hash_table_insert (h, (char*)"Monitor", GUINT_TO_POINTER (1));
4476 mono_memory_barrier ();
4477 direct_icall_type_hash = h;
4480 if (cmethod->klass == mono_defaults.math_class)
4482 /* No locking needed */
4483 if (cmethod->klass->image == mono_defaults.corlib && g_hash_table_lookup (direct_icall_type_hash, cmethod->klass->name))
4489 method_needs_stack_walk (MonoCompile *cfg, MonoMethod *cmethod)
4491 if (cmethod->klass == mono_defaults.systemtype_class) {
4492 if (!strcmp (cmethod->name, "GetType"))
4498 #define is_complex_isinst(klass) ((klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || mono_class_is_marshalbyref (klass) || (klass->flags & TYPE_ATTRIBUTE_SEALED) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
4501 emit_isinst_with_cache (MonoCompile *cfg, MonoClass *klass, MonoInst **args)
4503 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
4504 return mono_emit_method_call (cfg, mono_isinst, args, NULL);
4508 emit_castclass_with_cache (MonoCompile *cfg, MonoClass *klass, MonoInst **args)
4510 MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
4513 save_cast_details (cfg, klass, args [0]->dreg, TRUE);
4514 res = mono_emit_method_call (cfg, mono_castclass, args, NULL);
4515 reset_cast_details (cfg);
4521 get_castclass_cache_idx (MonoCompile *cfg)
4523 /* Each CASTCLASS_CACHE patch needs a unique index which identifies the call site */
4524 cfg->castclass_cache_index ++;
4525 return (cfg->method_index << 16) | cfg->castclass_cache_index;
4530 emit_isinst_with_cache_nonshared (MonoCompile *cfg, MonoInst *obj, MonoClass *klass)
4535 args [0] = obj; /* obj */
4536 EMIT_NEW_CLASSCONST (cfg, args [1], klass); /* klass */
4538 idx = get_castclass_cache_idx (cfg); /* inline cache*/
4539 args [2] = emit_runtime_constant (cfg, MONO_PATCH_INFO_CASTCLASS_CACHE, GINT_TO_POINTER (idx));
4541 return emit_isinst_with_cache (cfg, klass, args);
4545 emit_castclass_with_cache_nonshared (MonoCompile *cfg, MonoInst *obj, MonoClass *klass)
4554 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
4557 idx = get_castclass_cache_idx (cfg);
4558 args [2] = emit_runtime_constant (cfg, MONO_PATCH_INFO_CASTCLASS_CACHE, GINT_TO_POINTER (idx));
4560 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
4561 return emit_castclass_with_cache (cfg, klass, args);
4565 * Returns NULL and set the cfg exception on error.
4568 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
4570 MonoBasicBlock *is_null_bb;
4571 int obj_reg = src->dreg;
4572 int vtable_reg = alloc_preg (cfg);
4573 MonoInst *klass_inst = NULL;
4575 if (src->opcode == OP_PCONST && src->inst_p0 == 0)
4581 if (mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
4582 MonoInst *cache_ins;
4584 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
4589 /* klass - it's the second element of the cache entry*/
4590 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
4593 args [2] = cache_ins;
4595 return emit_castclass_with_cache (cfg, klass, args);
4598 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
4601 NEW_BBLOCK (cfg, is_null_bb);
4603 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4604 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
4606 save_cast_details (cfg, klass, obj_reg, FALSE);
4608 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4609 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4610 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
4612 int klass_reg = alloc_preg (cfg);
4614 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4616 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4617 /* the remoting code is broken, access the class for now */
4618 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4619 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4621 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4622 cfg->exception_ptr = klass;
4625 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4627 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4628 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4630 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
4632 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4633 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
4637 MONO_START_BB (cfg, is_null_bb);
4639 reset_cast_details (cfg);
4645 * Returns NULL and set the cfg exception on error.
4648 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
4651 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
4652 int obj_reg = src->dreg;
4653 int vtable_reg = alloc_preg (cfg);
4654 int res_reg = alloc_ireg_ref (cfg);
4655 MonoInst *klass_inst = NULL;
4660 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
4661 MonoInst *cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
4663 args [0] = src; /* obj */
4665 /* klass - it's the second element of the cache entry*/
4666 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
4668 args [2] = cache_ins; /* cache */
4669 return emit_isinst_with_cache (cfg, klass, args);
4672 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
4675 NEW_BBLOCK (cfg, is_null_bb);
4676 NEW_BBLOCK (cfg, false_bb);
4677 NEW_BBLOCK (cfg, end_bb);
4679 /* Do the assignment at the beginning, so the other assignment can be if converted */
4680 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
4681 ins->type = STACK_OBJ;
4684 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4685 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
4687 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4689 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4690 g_assert (!context_used);
4691 /* the is_null_bb target simply copies the input register to the output */
4692 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
4694 int klass_reg = alloc_preg (cfg);
4697 int rank_reg = alloc_preg (cfg);
4698 int eclass_reg = alloc_preg (cfg);
4700 g_assert (!context_used);
4701 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
4702 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
4703 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4704 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4705 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, cast_class));
4706 if (klass->cast_class == mono_defaults.object_class) {
4707 int parent_reg = alloc_preg (cfg);
4708 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, MONO_STRUCT_OFFSET (MonoClass, parent));
4709 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
4710 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4711 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4712 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
4713 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
4714 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4715 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4716 } else if (klass->cast_class == mono_defaults.enum_class) {
4717 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4718 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4719 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
4720 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4722 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
4723 /* Check that the object is a vector too */
4724 int bounds_reg = alloc_preg (cfg);
4725 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, MONO_STRUCT_OFFSET (MonoArray, bounds));
4726 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
4727 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4730 /* the is_null_bb target simply copies the input register to the output */
4731 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4733 } else if (mono_class_is_nullable (klass)) {
4734 g_assert (!context_used);
4735 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4736 /* the is_null_bb target simply copies the input register to the output */
4737 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
4739 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4740 g_assert (!context_used);
4741 /* the remoting code is broken, access the class for now */
4742 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4743 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4745 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4746 cfg->exception_ptr = klass;
4749 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4751 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4752 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4754 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4755 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
4757 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4758 /* the is_null_bb target simply copies the input register to the output */
4759 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
4764 MONO_START_BB (cfg, false_bb);
4766 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
4767 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4769 MONO_START_BB (cfg, is_null_bb);
4771 MONO_START_BB (cfg, end_bb);
4777 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4779 /* This opcode takes as input an object reference and a class, and returns:
4780 0) if the object is an instance of the class,
4781 1) if the object is not instance of the class,
4782 2) if the object is a proxy whose type cannot be determined */
4785 #ifndef DISABLE_REMOTING
4786 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
4788 MonoBasicBlock *true_bb, *false_bb, *end_bb;
4790 int obj_reg = src->dreg;
4791 int dreg = alloc_ireg (cfg);
4793 #ifndef DISABLE_REMOTING
4794 int klass_reg = alloc_preg (cfg);
4797 NEW_BBLOCK (cfg, true_bb);
4798 NEW_BBLOCK (cfg, false_bb);
4799 NEW_BBLOCK (cfg, end_bb);
4800 #ifndef DISABLE_REMOTING
4801 NEW_BBLOCK (cfg, false2_bb);
4802 NEW_BBLOCK (cfg, no_proxy_bb);
4805 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4806 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
4808 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4809 #ifndef DISABLE_REMOTING
4810 NEW_BBLOCK (cfg, interface_fail_bb);
4813 tmp_reg = alloc_preg (cfg);
4814 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4815 #ifndef DISABLE_REMOTING
4816 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
4817 MONO_START_BB (cfg, interface_fail_bb);
4818 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4820 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
4822 tmp_reg = alloc_preg (cfg);
4823 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4824 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4825 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
4827 mini_emit_iface_cast (cfg, tmp_reg, klass, false_bb, true_bb);
4830 #ifndef DISABLE_REMOTING
4831 tmp_reg = alloc_preg (cfg);
4832 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4833 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4835 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4836 tmp_reg = alloc_preg (cfg);
4837 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4838 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4840 tmp_reg = alloc_preg (cfg);
4841 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4842 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4843 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4845 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
4846 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
4848 MONO_START_BB (cfg, no_proxy_bb);
4850 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
4852 g_error ("transparent proxy support is disabled while trying to JIT code that uses it");
4856 MONO_START_BB (cfg, false_bb);
4858 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4859 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4861 #ifndef DISABLE_REMOTING
4862 MONO_START_BB (cfg, false2_bb);
4864 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
4865 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4868 MONO_START_BB (cfg, true_bb);
4870 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4872 MONO_START_BB (cfg, end_bb);
4875 MONO_INST_NEW (cfg, ins, OP_ICONST);
4877 ins->type = STACK_I4;
4883 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4885 /* This opcode takes as input an object reference and a class, and returns:
4886 0) if the object is an instance of the class,
4887 1) if the object is a proxy whose type cannot be determined
4888 an InvalidCastException exception is thrown otherwhise*/
4891 #ifndef DISABLE_REMOTING
4892 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
4894 MonoBasicBlock *ok_result_bb;
4896 int obj_reg = src->dreg;
4897 int dreg = alloc_ireg (cfg);
4898 int tmp_reg = alloc_preg (cfg);
4900 #ifndef DISABLE_REMOTING
4901 int klass_reg = alloc_preg (cfg);
4902 NEW_BBLOCK (cfg, end_bb);
4905 NEW_BBLOCK (cfg, ok_result_bb);
4907 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4908 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
4910 save_cast_details (cfg, klass, obj_reg, FALSE);
4912 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4913 #ifndef DISABLE_REMOTING
4914 NEW_BBLOCK (cfg, interface_fail_bb);
4916 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4917 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
4918 MONO_START_BB (cfg, interface_fail_bb);
4919 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4921 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
4923 tmp_reg = alloc_preg (cfg);
4924 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4925 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4926 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
4928 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4929 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4931 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4932 mini_emit_iface_cast (cfg, tmp_reg, klass, NULL, NULL);
4933 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, ok_result_bb);
4936 #ifndef DISABLE_REMOTING
4937 NEW_BBLOCK (cfg, no_proxy_bb);
4939 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4940 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4941 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4943 tmp_reg = alloc_preg (cfg);
4944 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4945 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4947 tmp_reg = alloc_preg (cfg);
4948 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4949 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4950 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4952 NEW_BBLOCK (cfg, fail_1_bb);
4954 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
4956 MONO_START_BB (cfg, fail_1_bb);
4958 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4959 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4961 MONO_START_BB (cfg, no_proxy_bb);
4963 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
4965 g_error ("Transparent proxy support is disabled while trying to JIT code that uses it");
4969 MONO_START_BB (cfg, ok_result_bb);
4971 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4973 #ifndef DISABLE_REMOTING
4974 MONO_START_BB (cfg, end_bb);
4978 MONO_INST_NEW (cfg, ins, OP_ICONST);
4980 ins->type = STACK_I4;
4985 static G_GNUC_UNUSED MonoInst*
4986 handle_enum_has_flag (MonoCompile *cfg, MonoClass *klass, MonoInst *enum_this, MonoInst *enum_flag)
4988 MonoType *enum_type = mono_type_get_underlying_type (&klass->byval_arg);
4989 guint32 load_opc = mono_type_to_load_membase (cfg, enum_type);
4992 switch (enum_type->type) {
4995 #if SIZEOF_REGISTER == 8
5007 MonoInst *load, *and_, *cmp, *ceq;
5008 int enum_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
5009 int and_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
5010 int dest_reg = alloc_ireg (cfg);
5012 EMIT_NEW_LOAD_MEMBASE (cfg, load, load_opc, enum_reg, enum_this->dreg, 0);
5013 EMIT_NEW_BIALU (cfg, and_, is_i4 ? OP_IAND : OP_LAND, and_reg, enum_reg, enum_flag->dreg);
5014 EMIT_NEW_BIALU (cfg, cmp, is_i4 ? OP_ICOMPARE : OP_LCOMPARE, -1, and_reg, enum_flag->dreg);
5015 EMIT_NEW_UNALU (cfg, ceq, is_i4 ? OP_ICEQ : OP_LCEQ, dest_reg, -1);
5017 ceq->type = STACK_I4;
5020 load = mono_decompose_opcode (cfg, load);
5021 and_ = mono_decompose_opcode (cfg, and_);
5022 cmp = mono_decompose_opcode (cfg, cmp);
5023 ceq = mono_decompose_opcode (cfg, ceq);
5031 * Returns NULL and set the cfg exception on error.
5033 static G_GNUC_UNUSED MonoInst*
5034 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used, gboolean virtual_)
5038 gpointer trampoline;
5039 MonoInst *obj, *method_ins, *tramp_ins;
5043 if (virtual_ && !cfg->llvm_only) {
5044 MonoMethod *invoke = mono_get_delegate_invoke (klass);
5047 if (!mono_get_delegate_virtual_invoke_impl (mono_method_signature (invoke), context_used ? NULL : method))
5051 obj = handle_alloc (cfg, klass, FALSE, mono_class_check_context_used (klass));
5055 /* Inline the contents of mono_delegate_ctor */
5057 /* Set target field */
5058 /* Optimize away setting of NULL target */
5059 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0)) {
5060 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
5061 if (cfg->gen_write_barriers) {
5062 dreg = alloc_preg (cfg);
5063 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target));
5064 emit_write_barrier (cfg, ptr, target);
5068 /* Set method field */
5069 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
5070 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
5073 * To avoid looking up the compiled code belonging to the target method
5074 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
5075 * store it, and we fill it after the method has been compiled.
5077 if (!method->dynamic && !(cfg->opt & MONO_OPT_SHARED)) {
5078 MonoInst *code_slot_ins;
5081 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
5083 domain = mono_domain_get ();
5084 mono_domain_lock (domain);
5085 if (!domain_jit_info (domain)->method_code_hash)
5086 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
5087 code_slot = (guint8 **)g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
5089 code_slot = (guint8 **)mono_domain_alloc0 (domain, sizeof (gpointer));
5090 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
5092 mono_domain_unlock (domain);
5094 code_slot_ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_METHOD_CODE_SLOT, method);
5096 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
5099 if (cfg->llvm_only) {
5100 MonoInst *args [16];
5105 args [2] = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
5106 mono_emit_jit_icall (cfg, mono_llvmonly_init_delegate_virtual, args);
5109 mono_emit_jit_icall (cfg, mono_llvmonly_init_delegate, args);
5115 if (cfg->compile_aot) {
5116 MonoDelegateClassMethodPair *del_tramp;
5118 del_tramp = (MonoDelegateClassMethodPair *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoDelegateClassMethodPair));
5119 del_tramp->klass = klass;
5120 del_tramp->method = context_used ? NULL : method;
5121 del_tramp->is_virtual = virtual_;
5122 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, del_tramp);
5125 trampoline = mono_create_delegate_virtual_trampoline (cfg->domain, klass, context_used ? NULL : method);
5127 trampoline = mono_create_delegate_trampoline_info (cfg->domain, klass, context_used ? NULL : method);
5128 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
5131 /* Set invoke_impl field */
5133 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
5135 dreg = alloc_preg (cfg);
5136 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, invoke_impl));
5137 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), dreg);
5139 dreg = alloc_preg (cfg);
5140 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, method_ptr));
5141 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr), dreg);
5144 dreg = alloc_preg (cfg);
5145 MONO_EMIT_NEW_ICONST (cfg, dreg, virtual_ ? 1 : 0);
5146 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_is_virtual), dreg);
5148 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
5154 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
5156 MonoJitICallInfo *info;
5158 /* Need to register the icall so it gets an icall wrapper */
5159 info = mono_get_array_new_va_icall (rank);
5161 cfg->flags |= MONO_CFG_HAS_VARARGS;
5163 /* mono_array_new_va () needs a vararg calling convention */
5164 cfg->exception_message = g_strdup ("array-new");
5165 cfg->disable_llvm = TRUE;
5167 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
5168 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
5172 * handle_constrained_gsharedvt_call:
5174 * Handle constrained calls where the receiver is a gsharedvt type.
5175 * Return the instruction representing the call. Set the cfg exception on failure.
5178 handle_constrained_gsharedvt_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp, MonoClass *constrained_class,
5179 gboolean *ref_emit_widen)
5181 MonoInst *ins = NULL;
5182 gboolean emit_widen = *ref_emit_widen;
5185 * Constrained calls need to behave differently at runtime dependending on whenever the receiver is instantiated as ref type or as a vtype.
5186 * This is hard to do with the current call code, since we would have to emit a branch and two different calls. So instead, we
5187 * pack the arguments into an array, and do the rest of the work in in an icall.
5189 if (((cmethod->klass == mono_defaults.object_class) || (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) || (!cmethod->klass->valuetype && cmethod->klass->image != mono_defaults.corlib)) &&
5190 (MONO_TYPE_IS_VOID (fsig->ret) || MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_IS_REFERENCE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret) || mini_is_gsharedvt_type (fsig->ret)) &&
5191 (fsig->param_count == 0 || (!fsig->hasthis && fsig->param_count == 1) || (fsig->param_count == 1 && (MONO_TYPE_IS_REFERENCE (fsig->params [0]) || fsig->params [0]->byref || mini_is_gsharedvt_type (fsig->params [0]))))) {
5192 MonoInst *args [16];
5195 * This case handles calls to
5196 * - object:ToString()/Equals()/GetHashCode(),
5197 * - System.IComparable<T>:CompareTo()
5198 * - System.IEquatable<T>:Equals ()
5199 * plus some simple interface calls enough to support AsyncTaskMethodBuilder.
5203 if (mono_method_check_context_used (cmethod))
5204 args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (cmethod), cmethod, MONO_RGCTX_INFO_METHOD);
5206 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
5207 args [2] = emit_get_rgctx_klass (cfg, mono_class_check_context_used (constrained_class), constrained_class, MONO_RGCTX_INFO_KLASS);
5209 /* !fsig->hasthis is for the wrapper for the Object.GetType () icall */
5210 if (fsig->hasthis && fsig->param_count) {
5211 /* Pass the arguments using a localloc-ed array using the format expected by runtime_invoke () */
5212 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
5213 ins->dreg = alloc_preg (cfg);
5214 ins->inst_imm = fsig->param_count * sizeof (mgreg_t);
5215 MONO_ADD_INS (cfg->cbb, ins);
5218 if (mini_is_gsharedvt_type (fsig->params [0])) {
5219 int addr_reg, deref_arg_reg;
5221 ins = emit_get_gsharedvt_info_klass (cfg, mono_class_from_mono_type (fsig->params [0]), MONO_RGCTX_INFO_CLASS_BOX_TYPE);
5222 deref_arg_reg = alloc_preg (cfg);
5223 /* deref_arg = BOX_TYPE != MONO_GSHAREDVT_BOX_TYPE_VTYPE */
5224 EMIT_NEW_BIALU_IMM (cfg, args [3], OP_ISUB_IMM, deref_arg_reg, ins->dreg, 1);
5226 EMIT_NEW_VARLOADA_VREG (cfg, ins, sp [1]->dreg, fsig->params [0]);
5227 addr_reg = ins->dreg;
5228 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, addr_reg);
5230 EMIT_NEW_ICONST (cfg, args [3], 0);
5231 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, sp [1]->dreg);
5234 EMIT_NEW_ICONST (cfg, args [3], 0);
5235 EMIT_NEW_ICONST (cfg, args [4], 0);
5237 ins = mono_emit_jit_icall (cfg, mono_gsharedvt_constrained_call, args);
5240 if (mini_is_gsharedvt_type (fsig->ret)) {
5241 ins = handle_unbox_gsharedvt (cfg, mono_class_from_mono_type (fsig->ret), ins);
5242 } else if (MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret)) {
5246 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), ins->dreg, sizeof (MonoObject));
5247 MONO_ADD_INS (cfg->cbb, add);
5249 NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, add->dreg, 0);
5250 MONO_ADD_INS (cfg->cbb, ins);
5251 /* ins represents the call result */
5254 GSHAREDVT_FAILURE (CEE_CALLVIRT);
5257 *ref_emit_widen = emit_widen;
5266 mono_emit_load_got_addr (MonoCompile *cfg)
5268 MonoInst *getaddr, *dummy_use;
5270 if (!cfg->got_var || cfg->got_var_allocated)
5273 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
5274 getaddr->cil_code = cfg->header->code;
5275 getaddr->dreg = cfg->got_var->dreg;
5277 /* Add it to the start of the first bblock */
5278 if (cfg->bb_entry->code) {
5279 getaddr->next = cfg->bb_entry->code;
5280 cfg->bb_entry->code = getaddr;
5283 MONO_ADD_INS (cfg->bb_entry, getaddr);
5285 cfg->got_var_allocated = TRUE;
5288 * Add a dummy use to keep the got_var alive, since real uses might
5289 * only be generated by the back ends.
5290 * Add it to end_bblock, so the variable's lifetime covers the whole
5292 * It would be better to make the usage of the got var explicit in all
5293 * cases when the backend needs it (i.e. calls, throw etc.), so this
5294 * wouldn't be needed.
5296 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
5297 MONO_ADD_INS (cfg->bb_exit, dummy_use);
5300 static int inline_limit;
5301 static gboolean inline_limit_inited;
5304 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
5306 MonoMethodHeaderSummary header;
5308 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
5309 MonoMethodSignature *sig = mono_method_signature (method);
5313 if (cfg->disable_inline)
5318 if (cfg->inline_depth > 10)
5321 if (!mono_method_get_header_summary (method, &header))
5324 /*runtime, icall and pinvoke are checked by summary call*/
5325 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
5326 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
5327 (mono_class_is_marshalbyref (method->klass)) ||
5331 /* also consider num_locals? */
5332 /* Do the size check early to avoid creating vtables */
5333 if (!inline_limit_inited) {
5334 if (g_getenv ("MONO_INLINELIMIT"))
5335 inline_limit = atoi (g_getenv ("MONO_INLINELIMIT"));
5337 inline_limit = INLINE_LENGTH_LIMIT;
5338 inline_limit_inited = TRUE;
5340 if (header.code_size >= inline_limit && !(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
5344 * if we can initialize the class of the method right away, we do,
5345 * otherwise we don't allow inlining if the class needs initialization,
5346 * since it would mean inserting a call to mono_runtime_class_init()
5347 * inside the inlined code
5349 if (!(cfg->opt & MONO_OPT_SHARED)) {
5350 /* The AggressiveInlining hint is a good excuse to force that cctor to run. */
5351 if (method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING) {
5352 vtable = mono_class_vtable (cfg->domain, method->klass);
5355 if (!cfg->compile_aot) {
5357 if (!mono_runtime_class_init_full (vtable, &error)) {
5358 mono_error_cleanup (&error);
5362 } else if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
5363 if (cfg->run_cctors && method->klass->has_cctor) {
5364 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
5365 if (!method->klass->runtime_info)
5366 /* No vtable created yet */
5368 vtable = mono_class_vtable (cfg->domain, method->klass);
5371 /* This makes so that inline cannot trigger */
5372 /* .cctors: too many apps depend on them */
5373 /* running with a specific order... */
5374 if (! vtable->initialized)
5377 if (!mono_runtime_class_init_full (vtable, &error)) {
5378 mono_error_cleanup (&error);
5382 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
5383 if (!method->klass->runtime_info)
5384 /* No vtable created yet */
5386 vtable = mono_class_vtable (cfg->domain, method->klass);
5389 if (!vtable->initialized)
5394 * If we're compiling for shared code
5395 * the cctor will need to be run at aot method load time, for example,
5396 * or at the end of the compilation of the inlining method.
5398 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
5402 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
5403 if (mono_arch_is_soft_float ()) {
5405 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
5407 for (i = 0; i < sig->param_count; ++i)
5408 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
5413 if (g_list_find (cfg->dont_inline, method))
5420 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoClass *klass, MonoVTable *vtable)
5422 if (!cfg->compile_aot) {
5424 if (vtable->initialized)
5428 if (klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
5429 if (cfg->method == method)
5433 if (!mono_class_needs_cctor_run (klass, method))
5436 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (klass == method->klass))
5437 /* The initialization is already done before the method is called */
5444 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
5448 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
5451 if (mini_is_gsharedvt_variable_klass (klass)) {
5454 mono_class_init (klass);
5455 size = mono_class_array_element_size (klass);
5458 mult_reg = alloc_preg (cfg);
5459 array_reg = arr->dreg;
5460 index_reg = index->dreg;
5462 #if SIZEOF_REGISTER == 8
5463 /* The array reg is 64 bits but the index reg is only 32 */
5464 if (COMPILE_LLVM (cfg)) {
5466 index2_reg = index_reg;
5468 index2_reg = alloc_preg (cfg);
5469 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
5472 if (index->type == STACK_I8) {
5473 index2_reg = alloc_preg (cfg);
5474 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
5476 index2_reg = index_reg;
5481 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
5483 #if defined(TARGET_X86) || defined(TARGET_AMD64)
5484 if (size == 1 || size == 2 || size == 4 || size == 8) {
5485 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
5487 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], MONO_STRUCT_OFFSET (MonoArray, vector));
5488 ins->klass = mono_class_get_element_class (klass);
5489 ins->type = STACK_MP;
5495 add_reg = alloc_ireg_mp (cfg);
5498 MonoInst *rgctx_ins;
5501 g_assert (cfg->gshared);
5502 context_used = mini_class_check_context_used (cfg, klass);
5503 g_assert (context_used);
5504 rgctx_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE);
5505 MONO_EMIT_NEW_BIALU (cfg, OP_IMUL, mult_reg, index2_reg, rgctx_ins->dreg);
5507 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
5509 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
5510 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
5511 ins->klass = mono_class_get_element_class (klass);
5512 ins->type = STACK_MP;
5513 MONO_ADD_INS (cfg->cbb, ins);
5519 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
5521 int bounds_reg = alloc_preg (cfg);
5522 int add_reg = alloc_ireg_mp (cfg);
5523 int mult_reg = alloc_preg (cfg);
5524 int mult2_reg = alloc_preg (cfg);
5525 int low1_reg = alloc_preg (cfg);
5526 int low2_reg = alloc_preg (cfg);
5527 int high1_reg = alloc_preg (cfg);
5528 int high2_reg = alloc_preg (cfg);
5529 int realidx1_reg = alloc_preg (cfg);
5530 int realidx2_reg = alloc_preg (cfg);
5531 int sum_reg = alloc_preg (cfg);
5532 int index1, index2, tmpreg;
5536 mono_class_init (klass);
5537 size = mono_class_array_element_size (klass);
5539 index1 = index_ins1->dreg;
5540 index2 = index_ins2->dreg;
5542 #if SIZEOF_REGISTER == 8
5543 /* The array reg is 64 bits but the index reg is only 32 */
5544 if (COMPILE_LLVM (cfg)) {
5547 tmpreg = alloc_preg (cfg);
5548 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index1);
5550 tmpreg = alloc_preg (cfg);
5551 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index2);
5555 // FIXME: Do we need to do something here for i8 indexes, like in ldelema_1_ins ?
5559 /* range checking */
5560 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
5561 arr->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
5563 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
5564 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5565 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
5566 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
5567 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5568 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
5569 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
5571 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
5572 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5573 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
5574 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
5575 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5576 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
5577 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
5579 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
5580 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
5581 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
5582 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
5583 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
5585 ins->type = STACK_MP;
5587 MONO_ADD_INS (cfg->cbb, ins);
5593 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
5597 MonoMethod *addr_method;
5599 MonoClass *eclass = cmethod->klass->element_class;
5601 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
5604 return mini_emit_ldelema_1_ins (cfg, eclass, sp [0], sp [1], TRUE);
5606 /* emit_ldelema_2 depends on OP_LMUL */
5607 if (!cfg->backend->emulate_mul_div && rank == 2 && (cfg->opt & MONO_OPT_INTRINS) && !mini_is_gsharedvt_variable_klass (eclass)) {
5608 return mini_emit_ldelema_2_ins (cfg, eclass, sp [0], sp [1], sp [2]);
5611 if (mini_is_gsharedvt_variable_klass (eclass))
5614 element_size = mono_class_array_element_size (eclass);
5615 addr_method = mono_marshal_get_array_address (rank, element_size);
5616 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
5621 static MonoBreakPolicy
5622 always_insert_breakpoint (MonoMethod *method)
5624 return MONO_BREAK_POLICY_ALWAYS;
5627 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
5630 * mono_set_break_policy:
5631 * policy_callback: the new callback function
5633 * Allow embedders to decide wherther to actually obey breakpoint instructions
5634 * (both break IL instructions and Debugger.Break () method calls), for example
5635 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
5636 * untrusted or semi-trusted code.
5638 * @policy_callback will be called every time a break point instruction needs to
5639 * be inserted with the method argument being the method that calls Debugger.Break()
5640 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
5641 * if it wants the breakpoint to not be effective in the given method.
5642 * #MONO_BREAK_POLICY_ALWAYS is the default.
5645 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
5647 if (policy_callback)
5648 break_policy_func = policy_callback;
5650 break_policy_func = always_insert_breakpoint;
5654 should_insert_brekpoint (MonoMethod *method) {
5655 switch (break_policy_func (method)) {
5656 case MONO_BREAK_POLICY_ALWAYS:
5658 case MONO_BREAK_POLICY_NEVER:
5660 case MONO_BREAK_POLICY_ON_DBG:
5661 g_warning ("mdb no longer supported");
5664 g_warning ("Incorrect value returned from break policy callback");
5669 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
5671 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
5673 MonoInst *addr, *store, *load;
5674 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
5676 /* the bounds check is already done by the callers */
5677 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
5679 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
5680 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
5681 if (mini_type_is_reference (&eklass->byval_arg))
5682 emit_write_barrier (cfg, addr, load);
5684 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
5685 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
5692 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
5694 return mini_type_is_reference (&klass->byval_arg);
5698 emit_array_store (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, gboolean safety_checks)
5700 if (safety_checks && generic_class_is_reference_type (cfg, klass) &&
5701 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
5702 MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class, 1);
5703 MonoMethod *helper = mono_marshal_get_virtual_stelemref (obj_array);
5704 MonoInst *iargs [3];
5707 mono_class_setup_vtable (obj_array);
5708 g_assert (helper->slot);
5710 if (sp [0]->type != STACK_OBJ)
5712 if (sp [2]->type != STACK_OBJ)
5719 return mono_emit_method_call (cfg, helper, iargs, sp [0]);
5723 if (mini_is_gsharedvt_variable_klass (klass)) {
5726 // FIXME-VT: OP_ICONST optimization
5727 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
5728 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5729 ins->opcode = OP_STOREV_MEMBASE;
5730 } else if (sp [1]->opcode == OP_ICONST) {
5731 int array_reg = sp [0]->dreg;
5732 int index_reg = sp [1]->dreg;
5733 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
5735 if (SIZEOF_REGISTER == 8 && COMPILE_LLVM (cfg))
5736 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, index_reg);
5739 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
5740 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
5742 MonoInst *addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], safety_checks);
5743 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5744 if (generic_class_is_reference_type (cfg, klass))
5745 emit_write_barrier (cfg, addr, sp [2]);
5752 emit_array_unsafe_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
5757 eklass = mono_class_from_mono_type (fsig->params [2]);
5759 eklass = mono_class_from_mono_type (fsig->ret);
5762 return emit_array_store (cfg, eklass, args, FALSE);
5764 MonoInst *ins, *addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
5765 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &eklass->byval_arg, addr->dreg, 0);
5771 is_unsafe_mov_compatible (MonoCompile *cfg, MonoClass *param_klass, MonoClass *return_klass)
5774 int param_size, return_size;
5776 param_klass = mono_class_from_mono_type (mini_get_underlying_type (¶m_klass->byval_arg));
5777 return_klass = mono_class_from_mono_type (mini_get_underlying_type (&return_klass->byval_arg));
5779 if (cfg->verbose_level > 3)
5780 printf ("[UNSAFE-MOV-INTRISIC] %s <- %s\n", return_klass->name, param_klass->name);
5782 //Don't allow mixing reference types with value types
5783 if (param_klass->valuetype != return_klass->valuetype) {
5784 if (cfg->verbose_level > 3)
5785 printf ("[UNSAFE-MOV-INTRISIC]\tone of the args is a valuetype and the other is not\n");
5789 if (!param_klass->valuetype) {
5790 if (cfg->verbose_level > 3)
5791 printf ("[UNSAFE-MOV-INTRISIC]\targs are reference types\n");
5796 if (param_klass->has_references || return_klass->has_references)
5799 /* Avoid mixing structs and primitive types/enums, they need to be handled differently in the JIT */
5800 if ((MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && !MONO_TYPE_ISSTRUCT (&return_klass->byval_arg)) ||
5801 (!MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && MONO_TYPE_ISSTRUCT (&return_klass->byval_arg))) {
5802 if (cfg->verbose_level > 3)
5803 printf ("[UNSAFE-MOV-INTRISIC]\tmixing structs and scalars\n");
5807 if (param_klass->byval_arg.type == MONO_TYPE_R4 || param_klass->byval_arg.type == MONO_TYPE_R8 ||
5808 return_klass->byval_arg.type == MONO_TYPE_R4 || return_klass->byval_arg.type == MONO_TYPE_R8) {
5809 if (cfg->verbose_level > 3)
5810 printf ("[UNSAFE-MOV-INTRISIC]\tfloat or double are not supported\n");
5814 param_size = mono_class_value_size (param_klass, &align);
5815 return_size = mono_class_value_size (return_klass, &align);
5817 //We can do it if sizes match
5818 if (param_size == return_size) {
5819 if (cfg->verbose_level > 3)
5820 printf ("[UNSAFE-MOV-INTRISIC]\tsame size\n");
5824 //No simple way to handle struct if sizes don't match
5825 if (MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg)) {
5826 if (cfg->verbose_level > 3)
5827 printf ("[UNSAFE-MOV-INTRISIC]\tsize mismatch and type is a struct\n");
5832 * Same reg size category.
5833 * A quick note on why we don't require widening here.
5834 * The intrinsic is "R Array.UnsafeMov<S,R> (S s)".
5836 * Since the source value comes from a function argument, the JIT will already have
5837 * the value in a VREG and performed any widening needed before (say, when loading from a field).
5839 if (param_size <= 4 && return_size <= 4) {
5840 if (cfg->verbose_level > 3)
5841 printf ("[UNSAFE-MOV-INTRISIC]\tsize mismatch but both are of the same reg class\n");
5849 emit_array_unsafe_mov (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args)
5851 MonoClass *param_klass = mono_class_from_mono_type (fsig->params [0]);
5852 MonoClass *return_klass = mono_class_from_mono_type (fsig->ret);
5854 if (mini_is_gsharedvt_variable_type (fsig->ret))
5857 //Valuetypes that are semantically equivalent or numbers than can be widened to
5858 if (is_unsafe_mov_compatible (cfg, param_klass, return_klass))
5861 //Arrays of valuetypes that are semantically equivalent
5862 if (param_klass->rank == 1 && return_klass->rank == 1 && is_unsafe_mov_compatible (cfg, param_klass->element_class, return_klass->element_class))
5869 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5871 #ifdef MONO_ARCH_SIMD_INTRINSICS
5872 MonoInst *ins = NULL;
5874 if (cfg->opt & MONO_OPT_SIMD) {
5875 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
5881 return mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
5885 emit_memory_barrier (MonoCompile *cfg, int kind)
5887 MonoInst *ins = NULL;
5888 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
5889 MONO_ADD_INS (cfg->cbb, ins);
5890 ins->backend.memory_barrier_kind = kind;
5896 llvm_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5898 MonoInst *ins = NULL;
5901 /* The LLVM backend supports these intrinsics */
5902 if (cmethod->klass == mono_defaults.math_class) {
5903 if (strcmp (cmethod->name, "Sin") == 0) {
5905 } else if (strcmp (cmethod->name, "Cos") == 0) {
5907 } else if (strcmp (cmethod->name, "Sqrt") == 0) {
5909 } else if (strcmp (cmethod->name, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8) {
5913 if (opcode && fsig->param_count == 1) {
5914 MONO_INST_NEW (cfg, ins, opcode);
5915 ins->type = STACK_R8;
5916 ins->dreg = mono_alloc_dreg (cfg, ins->type);
5917 ins->sreg1 = args [0]->dreg;
5918 MONO_ADD_INS (cfg->cbb, ins);
5922 if (cfg->opt & MONO_OPT_CMOV) {
5923 if (strcmp (cmethod->name, "Min") == 0) {
5924 if (fsig->params [0]->type == MONO_TYPE_I4)
5926 if (fsig->params [0]->type == MONO_TYPE_U4)
5927 opcode = OP_IMIN_UN;
5928 else if (fsig->params [0]->type == MONO_TYPE_I8)
5930 else if (fsig->params [0]->type == MONO_TYPE_U8)
5931 opcode = OP_LMIN_UN;
5932 } else if (strcmp (cmethod->name, "Max") == 0) {
5933 if (fsig->params [0]->type == MONO_TYPE_I4)
5935 if (fsig->params [0]->type == MONO_TYPE_U4)
5936 opcode = OP_IMAX_UN;
5937 else if (fsig->params [0]->type == MONO_TYPE_I8)
5939 else if (fsig->params [0]->type == MONO_TYPE_U8)
5940 opcode = OP_LMAX_UN;
5944 if (opcode && fsig->param_count == 2) {
5945 MONO_INST_NEW (cfg, ins, opcode);
5946 ins->type = fsig->params [0]->type == MONO_TYPE_I4 ? STACK_I4 : STACK_I8;
5947 ins->dreg = mono_alloc_dreg (cfg, ins->type);
5948 ins->sreg1 = args [0]->dreg;
5949 ins->sreg2 = args [1]->dreg;
5950 MONO_ADD_INS (cfg->cbb, ins);
5958 mini_emit_inst_for_sharable_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5960 if (cmethod->klass == mono_defaults.array_class) {
5961 if (strcmp (cmethod->name, "UnsafeStore") == 0)
5962 return emit_array_unsafe_access (cfg, fsig, args, TRUE);
5963 else if (strcmp (cmethod->name, "UnsafeLoad") == 0)
5964 return emit_array_unsafe_access (cfg, fsig, args, FALSE);
5965 else if (strcmp (cmethod->name, "UnsafeMov") == 0)
5966 return emit_array_unsafe_mov (cfg, fsig, args);
5973 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5975 MonoInst *ins = NULL;
5977 MonoClass *runtime_helpers_class = mono_class_get_runtime_helpers_class ();
5979 if (cmethod->klass == mono_defaults.string_class) {
5980 if (strcmp (cmethod->name, "get_Chars") == 0 && fsig->param_count + fsig->hasthis == 2) {
5981 int dreg = alloc_ireg (cfg);
5982 int index_reg = alloc_preg (cfg);
5983 int add_reg = alloc_preg (cfg);
5985 #if SIZEOF_REGISTER == 8
5986 if (COMPILE_LLVM (cfg)) {
5987 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, args [1]->dreg);
5989 /* The array reg is 64 bits but the index reg is only 32 */
5990 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
5993 index_reg = args [1]->dreg;
5995 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
5997 #if defined(TARGET_X86) || defined(TARGET_AMD64)
5998 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, MONO_STRUCT_OFFSET (MonoString, chars));
5999 add_reg = ins->dreg;
6000 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
6003 int mult_reg = alloc_preg (cfg);
6004 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
6005 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
6006 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
6007 add_reg, MONO_STRUCT_OFFSET (MonoString, chars));
6009 type_from_op (cfg, ins, NULL, NULL);
6011 } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count + fsig->hasthis == 1) {
6012 int dreg = alloc_ireg (cfg);
6013 /* Decompose later to allow more optimizations */
6014 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
6015 ins->type = STACK_I4;
6016 ins->flags |= MONO_INST_FAULT;
6017 cfg->cbb->has_array_access = TRUE;
6018 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
6023 } else if (cmethod->klass == mono_defaults.object_class) {
6024 if (strcmp (cmethod->name, "GetType") == 0 && fsig->param_count + fsig->hasthis == 1) {
6025 int dreg = alloc_ireg_ref (cfg);
6026 int vt_reg = alloc_preg (cfg);
6027 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
6028 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, MONO_STRUCT_OFFSET (MonoVTable, type));
6029 type_from_op (cfg, ins, NULL, NULL);
6032 } else if (!cfg->backend->emulate_mul_div && strcmp (cmethod->name, "InternalGetHashCode") == 0 && fsig->param_count == 1 && !mono_gc_is_moving ()) {
6033 int dreg = alloc_ireg (cfg);
6034 int t1 = alloc_ireg (cfg);
6036 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
6037 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
6038 ins->type = STACK_I4;
6041 } else if (strcmp (cmethod->name, ".ctor") == 0 && fsig->param_count == 0) {
6042 MONO_INST_NEW (cfg, ins, OP_NOP);
6043 MONO_ADD_INS (cfg->cbb, ins);
6047 } else if (cmethod->klass == mono_defaults.array_class) {
6048 if (strcmp (cmethod->name, "GetGenericValueImpl") == 0 && fsig->param_count + fsig->hasthis == 3 && !cfg->gsharedvt)
6049 return emit_array_generic_access (cfg, fsig, args, FALSE);
6050 else if (strcmp (cmethod->name, "SetGenericValueImpl") == 0 && fsig->param_count + fsig->hasthis == 3 && !cfg->gsharedvt)
6051 return emit_array_generic_access (cfg, fsig, args, TRUE);
6053 #ifndef MONO_BIG_ARRAYS
6055 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
6058 else if (((strcmp (cmethod->name, "GetLength") == 0 && fsig->param_count + fsig->hasthis == 2) ||
6059 (strcmp (cmethod->name, "GetLowerBound") == 0 && fsig->param_count + fsig->hasthis == 2)) &&
6060 args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
6061 int dreg = alloc_ireg (cfg);
6062 int bounds_reg = alloc_ireg_mp (cfg);
6063 MonoBasicBlock *end_bb, *szarray_bb;
6064 gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
6066 NEW_BBLOCK (cfg, end_bb);
6067 NEW_BBLOCK (cfg, szarray_bb);
6069 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
6070 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
6071 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
6072 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
6073 /* Non-szarray case */
6075 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
6076 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
6078 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
6079 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
6080 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
6081 MONO_START_BB (cfg, szarray_bb);
6084 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
6085 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
6087 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
6088 MONO_START_BB (cfg, end_bb);
6090 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
6091 ins->type = STACK_I4;
6097 if (cmethod->name [0] != 'g')
6100 if (strcmp (cmethod->name, "get_Rank") == 0 && fsig->param_count + fsig->hasthis == 1) {
6101 int dreg = alloc_ireg (cfg);
6102 int vtable_reg = alloc_preg (cfg);
6103 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
6104 args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
6105 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
6106 vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
6107 type_from_op (cfg, ins, NULL, NULL);
6110 } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count + fsig->hasthis == 1) {
6111 int dreg = alloc_ireg (cfg);
6113 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
6114 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
6115 type_from_op (cfg, ins, NULL, NULL);
6120 } else if (cmethod->klass == runtime_helpers_class) {
6121 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0 && fsig->param_count == 0) {
6122 EMIT_NEW_ICONST (cfg, ins, MONO_STRUCT_OFFSET (MonoString, chars));
6126 } else if (cmethod->klass == mono_defaults.monitor_class) {
6127 gboolean is_enter = FALSE;
6128 gboolean is_v4 = FALSE;
6130 if (!strcmp (cmethod->name, "Enter") && fsig->param_count == 2 && fsig->params [1]->byref) {
6134 if (!strcmp (cmethod->name, "Enter") && fsig->param_count == 1)
6139 * To make async stack traces work, icalls which can block should have a wrapper.
6140 * For Monitor.Enter, emit two calls: a fastpath which doesn't have a wrapper, and a slowpath, which does.
6142 MonoBasicBlock *end_bb;
6144 NEW_BBLOCK (cfg, end_bb);
6146 ins = mono_emit_jit_icall (cfg, is_v4 ? (gpointer)mono_monitor_enter_v4_fast : (gpointer)mono_monitor_enter_fast, args);
6147 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, ins->dreg, 0);
6148 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBNE_UN, end_bb);
6149 ins = mono_emit_jit_icall (cfg, is_v4 ? (gpointer)mono_monitor_enter_v4 : (gpointer)mono_monitor_enter, args);
6150 MONO_START_BB (cfg, end_bb);
6153 } else if (cmethod->klass == mono_defaults.thread_class) {
6154 if (strcmp (cmethod->name, "SpinWait_nop") == 0 && fsig->param_count == 0) {
6155 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
6156 MONO_ADD_INS (cfg->cbb, ins);
6158 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0) {
6159 return emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6160 } else if (!strcmp (cmethod->name, "VolatileRead") && fsig->param_count == 1) {
6162 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
6164 if (fsig->params [0]->type == MONO_TYPE_I1)
6165 opcode = OP_LOADI1_MEMBASE;
6166 else if (fsig->params [0]->type == MONO_TYPE_U1)
6167 opcode = OP_LOADU1_MEMBASE;
6168 else if (fsig->params [0]->type == MONO_TYPE_I2)
6169 opcode = OP_LOADI2_MEMBASE;
6170 else if (fsig->params [0]->type == MONO_TYPE_U2)
6171 opcode = OP_LOADU2_MEMBASE;
6172 else if (fsig->params [0]->type == MONO_TYPE_I4)
6173 opcode = OP_LOADI4_MEMBASE;
6174 else if (fsig->params [0]->type == MONO_TYPE_U4)
6175 opcode = OP_LOADU4_MEMBASE;
6176 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
6177 opcode = OP_LOADI8_MEMBASE;
6178 else if (fsig->params [0]->type == MONO_TYPE_R4)
6179 opcode = OP_LOADR4_MEMBASE;
6180 else if (fsig->params [0]->type == MONO_TYPE_R8)
6181 opcode = OP_LOADR8_MEMBASE;
6182 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
6183 opcode = OP_LOAD_MEMBASE;
6186 MONO_INST_NEW (cfg, ins, opcode);
6187 ins->inst_basereg = args [0]->dreg;
6188 ins->inst_offset = 0;
6189 MONO_ADD_INS (cfg->cbb, ins);
6191 switch (fsig->params [0]->type) {
6198 ins->dreg = mono_alloc_ireg (cfg);
6199 ins->type = STACK_I4;
6203 ins->dreg = mono_alloc_lreg (cfg);
6204 ins->type = STACK_I8;
6208 ins->dreg = mono_alloc_ireg (cfg);
6209 #if SIZEOF_REGISTER == 8
6210 ins->type = STACK_I8;
6212 ins->type = STACK_I4;
6217 ins->dreg = mono_alloc_freg (cfg);
6218 ins->type = STACK_R8;
6221 g_assert (mini_type_is_reference (fsig->params [0]));
6222 ins->dreg = mono_alloc_ireg_ref (cfg);
6223 ins->type = STACK_OBJ;
6227 if (opcode == OP_LOADI8_MEMBASE)
6228 ins = mono_decompose_opcode (cfg, ins);
6230 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6234 } else if (!strcmp (cmethod->name, "VolatileWrite") && fsig->param_count == 2) {
6236 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
6238 if (fsig->params [0]->type == MONO_TYPE_I1 || fsig->params [0]->type == MONO_TYPE_U1)
6239 opcode = OP_STOREI1_MEMBASE_REG;
6240 else if (fsig->params [0]->type == MONO_TYPE_I2 || fsig->params [0]->type == MONO_TYPE_U2)
6241 opcode = OP_STOREI2_MEMBASE_REG;
6242 else if (fsig->params [0]->type == MONO_TYPE_I4 || fsig->params [0]->type == MONO_TYPE_U4)
6243 opcode = OP_STOREI4_MEMBASE_REG;
6244 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
6245 opcode = OP_STOREI8_MEMBASE_REG;
6246 else if (fsig->params [0]->type == MONO_TYPE_R4)
6247 opcode = OP_STORER4_MEMBASE_REG;
6248 else if (fsig->params [0]->type == MONO_TYPE_R8)
6249 opcode = OP_STORER8_MEMBASE_REG;
6250 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
6251 opcode = OP_STORE_MEMBASE_REG;
6254 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6256 MONO_INST_NEW (cfg, ins, opcode);
6257 ins->sreg1 = args [1]->dreg;
6258 ins->inst_destbasereg = args [0]->dreg;
6259 ins->inst_offset = 0;
6260 MONO_ADD_INS (cfg->cbb, ins);
6262 if (opcode == OP_STOREI8_MEMBASE_REG)
6263 ins = mono_decompose_opcode (cfg, ins);
6268 } else if (cmethod->klass->image == mono_defaults.corlib &&
6269 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
6270 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
6273 #if SIZEOF_REGISTER == 8
6274 if (!cfg->llvm_only && strcmp (cmethod->name, "Read") == 0 && fsig->param_count == 1 && (fsig->params [0]->type == MONO_TYPE_I8)) {
6275 if (!cfg->llvm_only && mono_arch_opcode_supported (OP_ATOMIC_LOAD_I8)) {
6276 MONO_INST_NEW (cfg, ins, OP_ATOMIC_LOAD_I8);
6277 ins->dreg = mono_alloc_preg (cfg);
6278 ins->sreg1 = args [0]->dreg;
6279 ins->type = STACK_I8;
6280 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_SEQ;
6281 MONO_ADD_INS (cfg->cbb, ins);
6285 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6287 /* 64 bit reads are already atomic */
6288 MONO_INST_NEW (cfg, load_ins, OP_LOADI8_MEMBASE);
6289 load_ins->dreg = mono_alloc_preg (cfg);
6290 load_ins->inst_basereg = args [0]->dreg;
6291 load_ins->inst_offset = 0;
6292 load_ins->type = STACK_I8;
6293 MONO_ADD_INS (cfg->cbb, load_ins);
6295 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6302 if (strcmp (cmethod->name, "Increment") == 0 && fsig->param_count == 1) {
6303 MonoInst *ins_iconst;
6306 if (fsig->params [0]->type == MONO_TYPE_I4) {
6307 opcode = OP_ATOMIC_ADD_I4;
6308 cfg->has_atomic_add_i4 = TRUE;
6310 #if SIZEOF_REGISTER == 8
6311 else if (fsig->params [0]->type == MONO_TYPE_I8)
6312 opcode = OP_ATOMIC_ADD_I8;
6315 if (!mono_arch_opcode_supported (opcode))
6317 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
6318 ins_iconst->inst_c0 = 1;
6319 ins_iconst->dreg = mono_alloc_ireg (cfg);
6320 MONO_ADD_INS (cfg->cbb, ins_iconst);
6322 MONO_INST_NEW (cfg, ins, opcode);
6323 ins->dreg = mono_alloc_ireg (cfg);
6324 ins->inst_basereg = args [0]->dreg;
6325 ins->inst_offset = 0;
6326 ins->sreg2 = ins_iconst->dreg;
6327 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
6328 MONO_ADD_INS (cfg->cbb, ins);
6330 } else if (strcmp (cmethod->name, "Decrement") == 0 && fsig->param_count == 1) {
6331 MonoInst *ins_iconst;
6334 if (fsig->params [0]->type == MONO_TYPE_I4) {
6335 opcode = OP_ATOMIC_ADD_I4;
6336 cfg->has_atomic_add_i4 = TRUE;
6338 #if SIZEOF_REGISTER == 8
6339 else if (fsig->params [0]->type == MONO_TYPE_I8)
6340 opcode = OP_ATOMIC_ADD_I8;
6343 if (!mono_arch_opcode_supported (opcode))
6345 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
6346 ins_iconst->inst_c0 = -1;
6347 ins_iconst->dreg = mono_alloc_ireg (cfg);
6348 MONO_ADD_INS (cfg->cbb, ins_iconst);
6350 MONO_INST_NEW (cfg, ins, opcode);
6351 ins->dreg = mono_alloc_ireg (cfg);
6352 ins->inst_basereg = args [0]->dreg;
6353 ins->inst_offset = 0;
6354 ins->sreg2 = ins_iconst->dreg;
6355 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
6356 MONO_ADD_INS (cfg->cbb, ins);
6358 } else if (strcmp (cmethod->name, "Add") == 0 && fsig->param_count == 2) {
6361 if (fsig->params [0]->type == MONO_TYPE_I4) {
6362 opcode = OP_ATOMIC_ADD_I4;
6363 cfg->has_atomic_add_i4 = TRUE;
6365 #if SIZEOF_REGISTER == 8
6366 else if (fsig->params [0]->type == MONO_TYPE_I8)
6367 opcode = OP_ATOMIC_ADD_I8;
6370 if (!mono_arch_opcode_supported (opcode))
6372 MONO_INST_NEW (cfg, ins, opcode);
6373 ins->dreg = mono_alloc_ireg (cfg);
6374 ins->inst_basereg = args [0]->dreg;
6375 ins->inst_offset = 0;
6376 ins->sreg2 = args [1]->dreg;
6377 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
6378 MONO_ADD_INS (cfg->cbb, ins);
6381 else if (strcmp (cmethod->name, "Exchange") == 0 && fsig->param_count == 2) {
6382 MonoInst *f2i = NULL, *i2f;
6383 guint32 opcode, f2i_opcode, i2f_opcode;
6384 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
6385 gboolean is_float = fsig->params [0]->type == MONO_TYPE_R4 || fsig->params [0]->type == MONO_TYPE_R8;
6387 if (fsig->params [0]->type == MONO_TYPE_I4 ||
6388 fsig->params [0]->type == MONO_TYPE_R4) {
6389 opcode = OP_ATOMIC_EXCHANGE_I4;
6390 f2i_opcode = OP_MOVE_F_TO_I4;
6391 i2f_opcode = OP_MOVE_I4_TO_F;
6392 cfg->has_atomic_exchange_i4 = TRUE;
6394 #if SIZEOF_REGISTER == 8
6396 fsig->params [0]->type == MONO_TYPE_I8 ||
6397 fsig->params [0]->type == MONO_TYPE_R8 ||
6398 fsig->params [0]->type == MONO_TYPE_I) {
6399 opcode = OP_ATOMIC_EXCHANGE_I8;
6400 f2i_opcode = OP_MOVE_F_TO_I8;
6401 i2f_opcode = OP_MOVE_I8_TO_F;
6404 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I) {
6405 opcode = OP_ATOMIC_EXCHANGE_I4;
6406 cfg->has_atomic_exchange_i4 = TRUE;
6412 if (!mono_arch_opcode_supported (opcode))
6416 /* TODO: Decompose these opcodes instead of bailing here. */
6417 if (COMPILE_SOFT_FLOAT (cfg))
6420 MONO_INST_NEW (cfg, f2i, f2i_opcode);
6421 f2i->dreg = mono_alloc_ireg (cfg);
6422 f2i->sreg1 = args [1]->dreg;
6423 if (f2i_opcode == OP_MOVE_F_TO_I4)
6424 f2i->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6425 MONO_ADD_INS (cfg->cbb, f2i);
6428 MONO_INST_NEW (cfg, ins, opcode);
6429 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : mono_alloc_ireg (cfg);
6430 ins->inst_basereg = args [0]->dreg;
6431 ins->inst_offset = 0;
6432 ins->sreg2 = is_float ? f2i->dreg : args [1]->dreg;
6433 MONO_ADD_INS (cfg->cbb, ins);
6435 switch (fsig->params [0]->type) {
6437 ins->type = STACK_I4;
6440 ins->type = STACK_I8;
6443 #if SIZEOF_REGISTER == 8
6444 ins->type = STACK_I8;
6446 ins->type = STACK_I4;
6451 ins->type = STACK_R8;
6454 g_assert (mini_type_is_reference (fsig->params [0]));
6455 ins->type = STACK_OBJ;
6460 MONO_INST_NEW (cfg, i2f, i2f_opcode);
6461 i2f->dreg = mono_alloc_freg (cfg);
6462 i2f->sreg1 = ins->dreg;
6463 i2f->type = STACK_R8;
6464 if (i2f_opcode == OP_MOVE_I4_TO_F)
6465 i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6466 MONO_ADD_INS (cfg->cbb, i2f);
6471 if (cfg->gen_write_barriers && is_ref)
6472 emit_write_barrier (cfg, args [0], args [1]);
6474 else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 3) {
6475 MonoInst *f2i_new = NULL, *f2i_cmp = NULL, *i2f;
6476 guint32 opcode, f2i_opcode, i2f_opcode;
6477 gboolean is_ref = mini_type_is_reference (fsig->params [1]);
6478 gboolean is_float = fsig->params [1]->type == MONO_TYPE_R4 || fsig->params [1]->type == MONO_TYPE_R8;
6480 if (fsig->params [1]->type == MONO_TYPE_I4 ||
6481 fsig->params [1]->type == MONO_TYPE_R4) {
6482 opcode = OP_ATOMIC_CAS_I4;
6483 f2i_opcode = OP_MOVE_F_TO_I4;
6484 i2f_opcode = OP_MOVE_I4_TO_F;
6485 cfg->has_atomic_cas_i4 = TRUE;
6487 #if SIZEOF_REGISTER == 8
6489 fsig->params [1]->type == MONO_TYPE_I8 ||
6490 fsig->params [1]->type == MONO_TYPE_R8 ||
6491 fsig->params [1]->type == MONO_TYPE_I) {
6492 opcode = OP_ATOMIC_CAS_I8;
6493 f2i_opcode = OP_MOVE_F_TO_I8;
6494 i2f_opcode = OP_MOVE_I8_TO_F;
6497 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I) {
6498 opcode = OP_ATOMIC_CAS_I4;
6499 cfg->has_atomic_cas_i4 = TRUE;
6505 if (!mono_arch_opcode_supported (opcode))
6509 /* TODO: Decompose these opcodes instead of bailing here. */
6510 if (COMPILE_SOFT_FLOAT (cfg))
6513 MONO_INST_NEW (cfg, f2i_new, f2i_opcode);
6514 f2i_new->dreg = mono_alloc_ireg (cfg);
6515 f2i_new->sreg1 = args [1]->dreg;
6516 if (f2i_opcode == OP_MOVE_F_TO_I4)
6517 f2i_new->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6518 MONO_ADD_INS (cfg->cbb, f2i_new);
6520 MONO_INST_NEW (cfg, f2i_cmp, f2i_opcode);
6521 f2i_cmp->dreg = mono_alloc_ireg (cfg);
6522 f2i_cmp->sreg1 = args [2]->dreg;
6523 if (f2i_opcode == OP_MOVE_F_TO_I4)
6524 f2i_cmp->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6525 MONO_ADD_INS (cfg->cbb, f2i_cmp);
6528 MONO_INST_NEW (cfg, ins, opcode);
6529 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
6530 ins->sreg1 = args [0]->dreg;
6531 ins->sreg2 = is_float ? f2i_new->dreg : args [1]->dreg;
6532 ins->sreg3 = is_float ? f2i_cmp->dreg : args [2]->dreg;
6533 MONO_ADD_INS (cfg->cbb, ins);
6535 switch (fsig->params [1]->type) {
6537 ins->type = STACK_I4;
6540 ins->type = STACK_I8;
6543 #if SIZEOF_REGISTER == 8
6544 ins->type = STACK_I8;
6546 ins->type = STACK_I4;
6550 ins->type = cfg->r4_stack_type;
6553 ins->type = STACK_R8;
6556 g_assert (mini_type_is_reference (fsig->params [1]));
6557 ins->type = STACK_OBJ;
6562 MONO_INST_NEW (cfg, i2f, i2f_opcode);
6563 i2f->dreg = mono_alloc_freg (cfg);
6564 i2f->sreg1 = ins->dreg;
6565 i2f->type = STACK_R8;
6566 if (i2f_opcode == OP_MOVE_I4_TO_F)
6567 i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6568 MONO_ADD_INS (cfg->cbb, i2f);
6573 if (cfg->gen_write_barriers && is_ref)
6574 emit_write_barrier (cfg, args [0], args [1]);
6576 else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 4 &&
6577 fsig->params [1]->type == MONO_TYPE_I4) {
6578 MonoInst *cmp, *ceq;
6580 if (!mono_arch_opcode_supported (OP_ATOMIC_CAS_I4))
6583 /* int32 r = CAS (location, value, comparand); */
6584 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
6585 ins->dreg = alloc_ireg (cfg);
6586 ins->sreg1 = args [0]->dreg;
6587 ins->sreg2 = args [1]->dreg;
6588 ins->sreg3 = args [2]->dreg;
6589 ins->type = STACK_I4;
6590 MONO_ADD_INS (cfg->cbb, ins);
6592 /* bool result = r == comparand; */
6593 MONO_INST_NEW (cfg, cmp, OP_ICOMPARE);
6594 cmp->sreg1 = ins->dreg;
6595 cmp->sreg2 = args [2]->dreg;
6596 cmp->type = STACK_I4;
6597 MONO_ADD_INS (cfg->cbb, cmp);
6599 MONO_INST_NEW (cfg, ceq, OP_ICEQ);
6600 ceq->dreg = alloc_ireg (cfg);
6601 ceq->type = STACK_I4;
6602 MONO_ADD_INS (cfg->cbb, ceq);
6604 /* *success = result; */
6605 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, args [3]->dreg, 0, ceq->dreg);
6607 cfg->has_atomic_cas_i4 = TRUE;
6609 else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0)
6610 ins = emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6614 } else if (cmethod->klass->image == mono_defaults.corlib &&
6615 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
6616 (strcmp (cmethod->klass->name, "Volatile") == 0)) {
6619 if (!cfg->llvm_only && !strcmp (cmethod->name, "Read") && fsig->param_count == 1) {
6621 MonoType *t = fsig->params [0];
6623 gboolean is_float = t->type == MONO_TYPE_R4 || t->type == MONO_TYPE_R8;
6625 g_assert (t->byref);
6626 /* t is a byref type, so the reference check is more complicated */
6627 is_ref = mini_type_is_reference (&mono_class_from_mono_type (t)->byval_arg);
6628 if (t->type == MONO_TYPE_I1)
6629 opcode = OP_ATOMIC_LOAD_I1;
6630 else if (t->type == MONO_TYPE_U1 || t->type == MONO_TYPE_BOOLEAN)
6631 opcode = OP_ATOMIC_LOAD_U1;
6632 else if (t->type == MONO_TYPE_I2)
6633 opcode = OP_ATOMIC_LOAD_I2;
6634 else if (t->type == MONO_TYPE_U2)
6635 opcode = OP_ATOMIC_LOAD_U2;
6636 else if (t->type == MONO_TYPE_I4)
6637 opcode = OP_ATOMIC_LOAD_I4;
6638 else if (t->type == MONO_TYPE_U4)
6639 opcode = OP_ATOMIC_LOAD_U4;
6640 else if (t->type == MONO_TYPE_R4)
6641 opcode = OP_ATOMIC_LOAD_R4;
6642 else if (t->type == MONO_TYPE_R8)
6643 opcode = OP_ATOMIC_LOAD_R8;
6644 #if SIZEOF_REGISTER == 8
6645 else if (t->type == MONO_TYPE_I8 || t->type == MONO_TYPE_I)
6646 opcode = OP_ATOMIC_LOAD_I8;
6647 else if (is_ref || t->type == MONO_TYPE_U8 || t->type == MONO_TYPE_U)
6648 opcode = OP_ATOMIC_LOAD_U8;
6650 else if (t->type == MONO_TYPE_I)
6651 opcode = OP_ATOMIC_LOAD_I4;
6652 else if (is_ref || t->type == MONO_TYPE_U)
6653 opcode = OP_ATOMIC_LOAD_U4;
6657 if (!mono_arch_opcode_supported (opcode))
6660 MONO_INST_NEW (cfg, ins, opcode);
6661 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : (is_float ? mono_alloc_freg (cfg) : mono_alloc_ireg (cfg));
6662 ins->sreg1 = args [0]->dreg;
6663 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_ACQ;
6664 MONO_ADD_INS (cfg->cbb, ins);
6667 case MONO_TYPE_BOOLEAN:
6674 ins->type = STACK_I4;
6678 ins->type = STACK_I8;
6682 #if SIZEOF_REGISTER == 8
6683 ins->type = STACK_I8;
6685 ins->type = STACK_I4;
6689 ins->type = cfg->r4_stack_type;
6692 ins->type = STACK_R8;
6696 ins->type = STACK_OBJ;
6702 if (!cfg->llvm_only && !strcmp (cmethod->name, "Write") && fsig->param_count == 2) {
6704 MonoType *t = fsig->params [0];
6707 g_assert (t->byref);
6708 is_ref = mini_type_is_reference (&mono_class_from_mono_type (t)->byval_arg);
6709 if (t->type == MONO_TYPE_I1)
6710 opcode = OP_ATOMIC_STORE_I1;
6711 else if (t->type == MONO_TYPE_U1 || t->type == MONO_TYPE_BOOLEAN)
6712 opcode = OP_ATOMIC_STORE_U1;
6713 else if (t->type == MONO_TYPE_I2)
6714 opcode = OP_ATOMIC_STORE_I2;
6715 else if (t->type == MONO_TYPE_U2)
6716 opcode = OP_ATOMIC_STORE_U2;
6717 else if (t->type == MONO_TYPE_I4)
6718 opcode = OP_ATOMIC_STORE_I4;
6719 else if (t->type == MONO_TYPE_U4)
6720 opcode = OP_ATOMIC_STORE_U4;
6721 else if (t->type == MONO_TYPE_R4)
6722 opcode = OP_ATOMIC_STORE_R4;
6723 else if (t->type == MONO_TYPE_R8)
6724 opcode = OP_ATOMIC_STORE_R8;
6725 #if SIZEOF_REGISTER == 8
6726 else if (t->type == MONO_TYPE_I8 || t->type == MONO_TYPE_I)
6727 opcode = OP_ATOMIC_STORE_I8;
6728 else if (is_ref || t->type == MONO_TYPE_U8 || t->type == MONO_TYPE_U)
6729 opcode = OP_ATOMIC_STORE_U8;
6731 else if (t->type == MONO_TYPE_I)
6732 opcode = OP_ATOMIC_STORE_I4;
6733 else if (is_ref || t->type == MONO_TYPE_U)
6734 opcode = OP_ATOMIC_STORE_U4;
6738 if (!mono_arch_opcode_supported (opcode))
6741 MONO_INST_NEW (cfg, ins, opcode);
6742 ins->dreg = args [0]->dreg;
6743 ins->sreg1 = args [1]->dreg;
6744 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_REL;
6745 MONO_ADD_INS (cfg->cbb, ins);
6747 if (cfg->gen_write_barriers && is_ref)
6748 emit_write_barrier (cfg, args [0], args [1]);
6754 } else if (cmethod->klass->image == mono_defaults.corlib &&
6755 (strcmp (cmethod->klass->name_space, "System.Diagnostics") == 0) &&
6756 (strcmp (cmethod->klass->name, "Debugger") == 0)) {
6757 if (!strcmp (cmethod->name, "Break") && fsig->param_count == 0) {
6758 if (should_insert_brekpoint (cfg->method)) {
6759 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
6761 MONO_INST_NEW (cfg, ins, OP_NOP);
6762 MONO_ADD_INS (cfg->cbb, ins);
6766 } else if (cmethod->klass->image == mono_defaults.corlib &&
6767 (strcmp (cmethod->klass->name_space, "System") == 0) &&
6768 (strcmp (cmethod->klass->name, "Environment") == 0)) {
6769 if (!strcmp (cmethod->name, "get_IsRunningOnWindows") && fsig->param_count == 0) {
6771 EMIT_NEW_ICONST (cfg, ins, 1);
6773 EMIT_NEW_ICONST (cfg, ins, 0);
6776 } else if (cmethod->klass->image == mono_defaults.corlib &&
6777 (strcmp (cmethod->klass->name_space, "System.Reflection") == 0) &&
6778 (strcmp (cmethod->klass->name, "Assembly") == 0)) {
6779 if (cfg->llvm_only && !strcmp (cmethod->name, "GetExecutingAssembly")) {
6780 /* No stack walks are currently available, so implement this as an intrinsic */
6781 MonoInst *assembly_ins;
6783 EMIT_NEW_AOTCONST (cfg, assembly_ins, MONO_PATCH_INFO_IMAGE, cfg->method->klass->image);
6784 ins = mono_emit_jit_icall (cfg, mono_get_assembly_object, &assembly_ins);
6787 } else if (cmethod->klass->image == mono_defaults.corlib &&
6788 (strcmp (cmethod->klass->name_space, "System.Reflection") == 0) &&
6789 (strcmp (cmethod->klass->name, "MethodBase") == 0)) {
6790 if (cfg->llvm_only && !strcmp (cmethod->name, "GetCurrentMethod")) {
6791 /* No stack walks are currently available, so implement this as an intrinsic */
6792 MonoInst *method_ins;
6793 MonoMethod *declaring = cfg->method;
6795 /* This returns the declaring generic method */
6796 if (declaring->is_inflated)
6797 declaring = ((MonoMethodInflated*)cfg->method)->declaring;
6798 EMIT_NEW_AOTCONST (cfg, method_ins, MONO_PATCH_INFO_METHODCONST, declaring);
6799 ins = mono_emit_jit_icall (cfg, mono_get_method_object, &method_ins);
6800 cfg->no_inline = TRUE;
6801 if (cfg->method != cfg->current_method)
6802 inline_failure (cfg, "MethodBase:GetCurrentMethod ()");
6805 } else if (cmethod->klass == mono_defaults.math_class) {
6807 * There is general branchless code for Min/Max, but it does not work for
6809 * http://everything2.com/?node_id=1051618
6811 } else if (((!strcmp (cmethod->klass->image->assembly->aname.name, "MonoMac") ||
6812 !strcmp (cmethod->klass->image->assembly->aname.name, "monotouch")) &&
6813 !strcmp (cmethod->klass->name_space, "XamCore.ObjCRuntime") &&
6814 !strcmp (cmethod->klass->name, "Selector")) ||
6815 ((!strcmp (cmethod->klass->image->assembly->aname.name, "Xamarin.iOS") ||
6816 !strcmp (cmethod->klass->image->assembly->aname.name, "Xamarin.Mac")) &&
6817 !strcmp (cmethod->klass->name_space, "ObjCRuntime") &&
6818 !strcmp (cmethod->klass->name, "Selector"))
6820 if ((cfg->backend->have_objc_get_selector || cfg->compile_llvm) &&
6821 !strcmp (cmethod->name, "GetHandle") && fsig->param_count == 1 &&
6822 (args [0]->opcode == OP_GOT_ENTRY || args [0]->opcode == OP_AOTCONST) &&
6825 MonoJumpInfoToken *ji;
6828 if (args [0]->opcode == OP_GOT_ENTRY) {
6829 pi = (MonoInst *)args [0]->inst_p1;
6830 g_assert (pi->opcode == OP_PATCH_INFO);
6831 g_assert (GPOINTER_TO_INT (pi->inst_p1) == MONO_PATCH_INFO_LDSTR);
6832 ji = (MonoJumpInfoToken *)pi->inst_p0;
6834 g_assert (GPOINTER_TO_INT (args [0]->inst_p1) == MONO_PATCH_INFO_LDSTR);
6835 ji = (MonoJumpInfoToken *)args [0]->inst_p0;
6838 NULLIFY_INS (args [0]);
6840 s = mono_ldstr_utf8 (ji->image, mono_metadata_token_index (ji->token), &cfg->error);
6841 return_val_if_nok (&cfg->error, NULL);
6843 MONO_INST_NEW (cfg, ins, OP_OBJC_GET_SELECTOR);
6844 ins->dreg = mono_alloc_ireg (cfg);
6847 MONO_ADD_INS (cfg->cbb, ins);
6852 #ifdef MONO_ARCH_SIMD_INTRINSICS
6853 if (cfg->opt & MONO_OPT_SIMD) {
6854 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
6860 ins = mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
6864 if (COMPILE_LLVM (cfg)) {
6865 ins = llvm_emit_inst_for_method (cfg, cmethod, fsig, args);
6870 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
6874 * This entry point could be used later for arbitrary method
6877 inline static MonoInst*
6878 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
6879 MonoMethodSignature *signature, MonoInst **args, MonoInst *this_ins)
6881 if (method->klass == mono_defaults.string_class) {
6882 /* managed string allocation support */
6883 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS) && !(cfg->opt & MONO_OPT_SHARED)) {
6884 MonoInst *iargs [2];
6885 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
6886 MonoMethod *managed_alloc = NULL;
6888 g_assert (vtable); /*Should not fail since it System.String*/
6889 #ifndef MONO_CROSS_COMPILE
6890 managed_alloc = mono_gc_get_managed_allocator (method->klass, FALSE, FALSE);
6894 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
6895 iargs [1] = args [0];
6896 return mono_emit_method_call (cfg, managed_alloc, iargs, this_ins);
6903 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
6905 MonoInst *store, *temp;
6908 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
6909 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
6912 * FIXME: We should use *args++ = sp [0], but that would mean the arg
6913 * would be different than the MonoInst's used to represent arguments, and
6914 * the ldelema implementation can't deal with that.
6915 * Solution: When ldelema is used on an inline argument, create a var for
6916 * it, emit ldelema on that var, and emit the saving code below in
6917 * inline_method () if needed.
6919 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
6920 cfg->args [i] = temp;
6921 /* This uses cfg->args [i] which is set by the preceeding line */
6922 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
6923 store->cil_code = sp [0]->cil_code;
6928 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
6929 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
6931 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
6933 check_inline_called_method_name_limit (MonoMethod *called_method)
6936 static const char *limit = NULL;
6938 if (limit == NULL) {
6939 const char *limit_string = g_getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
6941 if (limit_string != NULL)
6942 limit = limit_string;
6947 if (limit [0] != '\0') {
6948 char *called_method_name = mono_method_full_name (called_method, TRUE);
6950 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
6951 g_free (called_method_name);
6953 //return (strncmp_result <= 0);
6954 return (strncmp_result == 0);
6961 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
6963 check_inline_caller_method_name_limit (MonoMethod *caller_method)
6966 static const char *limit = NULL;
6968 if (limit == NULL) {
6969 const char *limit_string = g_getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
6970 if (limit_string != NULL) {
6971 limit = limit_string;
6977 if (limit [0] != '\0') {
6978 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
6980 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
6981 g_free (caller_method_name);
6983 //return (strncmp_result <= 0);
6984 return (strncmp_result == 0);
6992 emit_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
6994 static double r8_0 = 0.0;
6995 static float r4_0 = 0.0;
6999 rtype = mini_get_underlying_type (rtype);
7003 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
7004 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
7005 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
7006 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
7007 MONO_EMIT_NEW_I8CONST (cfg, dreg, 0);
7008 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
7009 MONO_INST_NEW (cfg, ins, OP_R4CONST);
7010 ins->type = STACK_R4;
7011 ins->inst_p0 = (void*)&r4_0;
7013 MONO_ADD_INS (cfg->cbb, ins);
7014 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
7015 MONO_INST_NEW (cfg, ins, OP_R8CONST);
7016 ins->type = STACK_R8;
7017 ins->inst_p0 = (void*)&r8_0;
7019 MONO_ADD_INS (cfg->cbb, ins);
7020 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
7021 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
7022 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
7023 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (rtype)) {
7024 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
7026 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
7031 emit_dummy_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
7035 rtype = mini_get_underlying_type (rtype);
7039 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_PCONST);
7040 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
7041 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_ICONST);
7042 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
7043 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_I8CONST);
7044 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
7045 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R4CONST);
7046 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
7047 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R8CONST);
7048 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
7049 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
7050 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
7051 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (rtype)) {
7052 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
7054 emit_init_rvar (cfg, dreg, rtype);
7058 /* If INIT is FALSE, emit dummy initialization statements to keep the IR valid */
7060 emit_init_local (MonoCompile *cfg, int local, MonoType *type, gboolean init)
7062 MonoInst *var = cfg->locals [local];
7063 if (COMPILE_SOFT_FLOAT (cfg)) {
7065 int reg = alloc_dreg (cfg, (MonoStackType)var->type);
7066 emit_init_rvar (cfg, reg, type);
7067 EMIT_NEW_LOCSTORE (cfg, store, local, cfg->cbb->last_ins);
7070 emit_init_rvar (cfg, var->dreg, type);
7072 emit_dummy_init_rvar (cfg, var->dreg, type);
7079 * Return the cost of inlining CMETHOD.
7082 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
7083 guchar *ip, guint real_offset, gboolean inline_always)
7086 MonoInst *ins, *rvar = NULL;
7087 MonoMethodHeader *cheader;
7088 MonoBasicBlock *ebblock, *sbblock;
7090 MonoMethod *prev_inlined_method;
7091 MonoInst **prev_locals, **prev_args;
7092 MonoType **prev_arg_types;
7093 guint prev_real_offset;
7094 GHashTable *prev_cbb_hash;
7095 MonoBasicBlock **prev_cil_offset_to_bb;
7096 MonoBasicBlock *prev_cbb;
7097 const unsigned char *prev_ip;
7098 unsigned char *prev_cil_start;
7099 guint32 prev_cil_offset_to_bb_len;
7100 MonoMethod *prev_current_method;
7101 MonoGenericContext *prev_generic_context;
7102 gboolean ret_var_set, prev_ret_var_set, prev_disable_inline, virtual_ = FALSE;
7104 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
7106 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
7107 if ((! inline_always) && ! check_inline_called_method_name_limit (cmethod))
7110 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
7111 if ((! inline_always) && ! check_inline_caller_method_name_limit (cfg->method))
7116 fsig = mono_method_signature (cmethod);
7118 if (cfg->verbose_level > 2)
7119 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
7121 if (!cmethod->inline_info) {
7122 cfg->stat_inlineable_methods++;
7123 cmethod->inline_info = 1;
7126 /* allocate local variables */
7127 cheader = mono_method_get_header_checked (cmethod, &error);
7129 if (inline_always) {
7130 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
7131 mono_error_move (&cfg->error, &error);
7133 mono_error_cleanup (&error);
7138 /*Must verify before creating locals as it can cause the JIT to assert.*/
7139 if (mono_compile_is_broken (cfg, cmethod, FALSE)) {
7140 mono_metadata_free_mh (cheader);
7144 /* allocate space to store the return value */
7145 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
7146 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
7149 prev_locals = cfg->locals;
7150 cfg->locals = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
7151 for (i = 0; i < cheader->num_locals; ++i)
7152 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
7154 /* allocate start and end blocks */
7155 /* This is needed so if the inline is aborted, we can clean up */
7156 NEW_BBLOCK (cfg, sbblock);
7157 sbblock->real_offset = real_offset;
7159 NEW_BBLOCK (cfg, ebblock);
7160 ebblock->block_num = cfg->num_bblocks++;
7161 ebblock->real_offset = real_offset;
7163 prev_args = cfg->args;
7164 prev_arg_types = cfg->arg_types;
7165 prev_inlined_method = cfg->inlined_method;
7166 cfg->inlined_method = cmethod;
7167 cfg->ret_var_set = FALSE;
7168 cfg->inline_depth ++;
7169 prev_real_offset = cfg->real_offset;
7170 prev_cbb_hash = cfg->cbb_hash;
7171 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
7172 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
7173 prev_cil_start = cfg->cil_start;
7175 prev_cbb = cfg->cbb;
7176 prev_current_method = cfg->current_method;
7177 prev_generic_context = cfg->generic_context;
7178 prev_ret_var_set = cfg->ret_var_set;
7179 prev_disable_inline = cfg->disable_inline;
7181 if (ip && *ip == CEE_CALLVIRT && !(cmethod->flags & METHOD_ATTRIBUTE_STATIC))
7184 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, sp, real_offset, virtual_);
7186 ret_var_set = cfg->ret_var_set;
7188 cfg->inlined_method = prev_inlined_method;
7189 cfg->real_offset = prev_real_offset;
7190 cfg->cbb_hash = prev_cbb_hash;
7191 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
7192 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
7193 cfg->cil_start = prev_cil_start;
7195 cfg->locals = prev_locals;
7196 cfg->args = prev_args;
7197 cfg->arg_types = prev_arg_types;
7198 cfg->current_method = prev_current_method;
7199 cfg->generic_context = prev_generic_context;
7200 cfg->ret_var_set = prev_ret_var_set;
7201 cfg->disable_inline = prev_disable_inline;
7202 cfg->inline_depth --;
7204 if ((costs >= 0 && costs < 60) || inline_always || (costs >= 0 && (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))) {
7205 if (cfg->verbose_level > 2)
7206 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
7208 cfg->stat_inlined_methods++;
7210 /* always add some code to avoid block split failures */
7211 MONO_INST_NEW (cfg, ins, OP_NOP);
7212 MONO_ADD_INS (prev_cbb, ins);
7214 prev_cbb->next_bb = sbblock;
7215 link_bblock (cfg, prev_cbb, sbblock);
7218 * Get rid of the begin and end bblocks if possible to aid local
7221 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
7223 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
7224 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
7226 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
7227 MonoBasicBlock *prev = ebblock->in_bb [0];
7229 if (prev->next_bb == ebblock) {
7230 mono_merge_basic_blocks (cfg, prev, ebblock);
7232 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
7233 mono_merge_basic_blocks (cfg, prev_cbb, prev);
7234 cfg->cbb = prev_cbb;
7237 /* There could be a bblock after 'prev', and making 'prev' the current bb could cause problems */
7242 * Its possible that the rvar is set in some prev bblock, but not in others.
7248 for (i = 0; i < ebblock->in_count; ++i) {
7249 bb = ebblock->in_bb [i];
7251 if (bb->last_ins && bb->last_ins->opcode == OP_NOT_REACHED) {
7254 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
7264 * If the inlined method contains only a throw, then the ret var is not
7265 * set, so set it to a dummy value.
7268 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
7270 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
7273 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
7276 if (cfg->verbose_level > 2)
7277 printf ("INLINE ABORTED %s (cost %d)\n", mono_method_full_name (cmethod, TRUE), costs);
7278 cfg->exception_type = MONO_EXCEPTION_NONE;
7280 /* This gets rid of the newly added bblocks */
7281 cfg->cbb = prev_cbb;
7283 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
7288 * Some of these comments may well be out-of-date.
7289 * Design decisions: we do a single pass over the IL code (and we do bblock
7290 * splitting/merging in the few cases when it's required: a back jump to an IL
7291 * address that was not already seen as bblock starting point).
7292 * Code is validated as we go (full verification is still better left to metadata/verify.c).
7293 * Complex operations are decomposed in simpler ones right away. We need to let the
7294 * arch-specific code peek and poke inside this process somehow (except when the
7295 * optimizations can take advantage of the full semantic info of coarse opcodes).
7296 * All the opcodes of the form opcode.s are 'normalized' to opcode.
7297 * MonoInst->opcode initially is the IL opcode or some simplification of that
7298 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
7299 * opcode with value bigger than OP_LAST.
7300 * At this point the IR can be handed over to an interpreter, a dumb code generator
7301 * or to the optimizing code generator that will translate it to SSA form.
7303 * Profiling directed optimizations.
7304 * We may compile by default with few or no optimizations and instrument the code
7305 * or the user may indicate what methods to optimize the most either in a config file
7306 * or through repeated runs where the compiler applies offline the optimizations to
7307 * each method and then decides if it was worth it.
7310 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
7311 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
7312 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
7313 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
7314 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
7315 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
7316 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
7317 #define CHECK_TYPELOAD(klass) if (!(klass) || mono_class_has_failure (klass)) TYPE_LOAD_ERROR ((klass))
7319 /* offset from br.s -> br like opcodes */
7320 #define BIG_BRANCH_OFFSET 13
7323 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
7325 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
7327 return b == NULL || b == bb;
7331 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
7333 unsigned char *ip = start;
7334 unsigned char *target;
7337 MonoBasicBlock *bblock;
7338 const MonoOpcode *opcode;
7341 cli_addr = ip - start;
7342 i = mono_opcode_value ((const guint8 **)&ip, end);
7345 opcode = &mono_opcodes [i];
7346 switch (opcode->argument) {
7347 case MonoInlineNone:
7350 case MonoInlineString:
7351 case MonoInlineType:
7352 case MonoInlineField:
7353 case MonoInlineMethod:
7356 case MonoShortInlineR:
7363 case MonoShortInlineVar:
7364 case MonoShortInlineI:
7367 case MonoShortInlineBrTarget:
7368 target = start + cli_addr + 2 + (signed char)ip [1];
7369 GET_BBLOCK (cfg, bblock, target);
7372 GET_BBLOCK (cfg, bblock, ip);
7374 case MonoInlineBrTarget:
7375 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
7376 GET_BBLOCK (cfg, bblock, target);
7379 GET_BBLOCK (cfg, bblock, ip);
7381 case MonoInlineSwitch: {
7382 guint32 n = read32 (ip + 1);
7385 cli_addr += 5 + 4 * n;
7386 target = start + cli_addr;
7387 GET_BBLOCK (cfg, bblock, target);
7389 for (j = 0; j < n; ++j) {
7390 target = start + cli_addr + (gint32)read32 (ip);
7391 GET_BBLOCK (cfg, bblock, target);
7401 g_assert_not_reached ();
7404 if (i == CEE_THROW) {
7405 unsigned char *bb_start = ip - 1;
7407 /* Find the start of the bblock containing the throw */
7409 while ((bb_start >= start) && !bblock) {
7410 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
7414 bblock->out_of_line = 1;
7424 static inline MonoMethod *
7425 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context, MonoError *error)
7429 mono_error_init (error);
7431 if (m->wrapper_type != MONO_WRAPPER_NONE) {
7432 method = (MonoMethod *)mono_method_get_wrapper_data (m, token);
7434 method = mono_class_inflate_generic_method_checked (method, context, error);
7437 method = mono_get_method_checked (m->klass->image, token, klass, context, error);
7443 static inline MonoMethod *
7444 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
7447 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context, cfg ? &cfg->error : &error);
7449 if (method && cfg && !cfg->gshared && mono_class_is_open_constructed_type (&method->klass->byval_arg)) {
7450 mono_error_set_bad_image (&cfg->error, cfg->method->klass->image, "Method with open type while not compiling gshared");
7454 if (!method && !cfg)
7455 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7460 static inline MonoClass*
7461 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
7466 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7467 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
7469 klass = mono_class_inflate_generic_class_checked (klass, context, &error);
7470 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7473 klass = mono_class_get_and_inflate_typespec_checked (method->klass->image, token, context, &error);
7474 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7477 mono_class_init (klass);
7481 static inline MonoMethodSignature*
7482 mini_get_signature (MonoMethod *method, guint32 token, MonoGenericContext *context, MonoError *error)
7484 MonoMethodSignature *fsig;
7486 mono_error_init (error);
7487 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7488 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
7490 fsig = mono_metadata_parse_signature_checked (method->klass->image, token, error);
7491 return_val_if_nok (error, NULL);
7494 fsig = mono_inflate_generic_signature(fsig, context, error);
7500 throw_exception (void)
7502 static MonoMethod *method = NULL;
7505 MonoSecurityManager *secman = mono_security_manager_get_methods ();
7506 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
7513 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
7515 MonoMethod *thrower = throw_exception ();
7518 EMIT_NEW_PCONST (cfg, args [0], ex);
7519 mono_emit_method_call (cfg, thrower, args, NULL);
7523 * Return the original method is a wrapper is specified. We can only access
7524 * the custom attributes from the original method.
7527 get_original_method (MonoMethod *method)
7529 if (method->wrapper_type == MONO_WRAPPER_NONE)
7532 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
7533 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
7536 /* in other cases we need to find the original method */
7537 return mono_marshal_method_from_wrapper (method);
7541 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field)
7543 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
7544 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
7546 emit_throw_exception (cfg, ex);
7550 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
7552 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
7553 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
7555 emit_throw_exception (cfg, ex);
7559 * Check that the IL instructions at ip are the array initialization
7560 * sequence and return the pointer to the data and the size.
7563 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
7566 * newarr[System.Int32]
7568 * ldtoken field valuetype ...
7569 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
7571 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
7573 guint32 token = read32 (ip + 7);
7574 guint32 field_token = read32 (ip + 2);
7575 guint32 field_index = field_token & 0xffffff;
7577 const char *data_ptr;
7579 MonoMethod *cmethod;
7580 MonoClass *dummy_class;
7581 MonoClassField *field = mono_field_from_token_checked (method->klass->image, field_token, &dummy_class, NULL, &error);
7585 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7589 *out_field_token = field_token;
7591 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
7594 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
7596 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
7597 case MONO_TYPE_BOOLEAN:
7601 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
7602 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
7603 case MONO_TYPE_CHAR:
7620 if (size > mono_type_size (field->type, &dummy_align))
7623 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
7624 if (!image_is_dynamic (method->klass->image)) {
7625 field_index = read32 (ip + 2) & 0xffffff;
7626 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
7627 data_ptr = mono_image_rva_map (method->klass->image, rva);
7628 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
7629 /* for aot code we do the lookup on load */
7630 if (aot && data_ptr)
7631 return (const char *)GUINT_TO_POINTER (rva);
7633 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
7635 data_ptr = mono_field_get_data (field);
7643 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
7646 char *method_fname = mono_method_full_name (method, TRUE);
7648 MonoMethodHeader *header = mono_method_get_header_checked (method, &error);
7651 method_code = g_strdup_printf ("could not parse method body due to %s", mono_error_get_message (&error));
7652 mono_error_cleanup (&error);
7653 } else if (header->code_size == 0)
7654 method_code = g_strdup ("method body is empty.");
7656 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
7657 mono_cfg_set_exception_invalid_program (cfg, g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code));
7658 g_free (method_fname);
7659 g_free (method_code);
7660 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
7664 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
7667 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
7668 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
7669 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
7670 /* Optimize reg-reg moves away */
7672 * Can't optimize other opcodes, since sp[0] might point to
7673 * the last ins of a decomposed opcode.
7675 sp [0]->dreg = (cfg)->locals [n]->dreg;
7677 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
7682 * ldloca inhibits many optimizations so try to get rid of it in common
7685 static inline unsigned char *
7686 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
7696 local = read16 (ip + 2);
7700 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
7701 /* From the INITOBJ case */
7702 token = read32 (ip + 2);
7703 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
7704 CHECK_TYPELOAD (klass);
7705 type = mini_get_underlying_type (&klass->byval_arg);
7706 emit_init_local (cfg, local, type, TRUE);
7714 emit_llvmonly_virtual_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used, MonoInst **sp)
7716 MonoInst *icall_args [16];
7717 MonoInst *call_target, *ins, *vtable_ins;
7718 int arg_reg, this_reg, vtable_reg;
7719 gboolean is_iface = cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE;
7720 gboolean is_gsharedvt = cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig);
7721 gboolean variant_iface = FALSE;
7726 * In llvm-only mode, vtables contain function descriptors instead of
7727 * method addresses/trampolines.
7729 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
7732 slot = mono_method_get_imt_slot (cmethod);
7734 slot = mono_method_get_vtable_index (cmethod);
7736 this_reg = sp [0]->dreg;
7738 if (is_iface && mono_class_has_variant_generic_params (cmethod->klass))
7739 variant_iface = TRUE;
7741 if (!fsig->generic_param_count && !is_iface && !is_gsharedvt) {
7743 * The simplest case, a normal virtual call.
7745 int slot_reg = alloc_preg (cfg);
7746 int addr_reg = alloc_preg (cfg);
7747 int arg_reg = alloc_preg (cfg);
7748 MonoBasicBlock *non_null_bb;
7750 vtable_reg = alloc_preg (cfg);
7751 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_ins, OP_LOAD_MEMBASE, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
7752 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) + (slot * SIZEOF_VOID_P);
7754 /* Load the vtable slot, which contains a function descriptor. */
7755 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, slot_reg, vtable_reg, offset);
7757 NEW_BBLOCK (cfg, non_null_bb);
7759 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, slot_reg, 0);
7760 cfg->cbb->last_ins->flags |= MONO_INST_LIKELY;
7761 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, non_null_bb);
7764 // FIXME: Make the wrapper use the preserveall cconv
7765 // FIXME: Use one icall per slot for small slot numbers ?
7766 icall_args [0] = vtable_ins;
7767 EMIT_NEW_ICONST (cfg, icall_args [1], slot);
7768 /* Make the icall return the vtable slot value to save some code space */
7769 ins = mono_emit_jit_icall (cfg, mono_init_vtable_slot, icall_args);
7770 ins->dreg = slot_reg;
7771 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, non_null_bb);
7774 MONO_START_BB (cfg, non_null_bb);
7775 /* Load the address + arg from the vtable slot */
7776 EMIT_NEW_LOAD_MEMBASE (cfg, call_target, OP_LOAD_MEMBASE, addr_reg, slot_reg, 0);
7777 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, arg_reg, slot_reg, SIZEOF_VOID_P);
7779 return emit_extra_arg_calli (cfg, fsig, sp, arg_reg, call_target);
7782 if (!fsig->generic_param_count && is_iface && !variant_iface && !is_gsharedvt) {
7784 * A simple interface call
7786 * We make a call through an imt slot to obtain the function descriptor we need to call.
7787 * The imt slot contains a function descriptor for a runtime function + arg.
7789 int slot_reg = alloc_preg (cfg);
7790 int addr_reg = alloc_preg (cfg);
7791 int arg_reg = alloc_preg (cfg);
7792 MonoInst *thunk_addr_ins, *thunk_arg_ins, *ftndesc_ins;
7794 vtable_reg = alloc_preg (cfg);
7795 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_ins, OP_LOAD_MEMBASE, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
7796 offset = ((gint32)slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
7799 * The slot is already initialized when the vtable is created so there is no need
7803 /* Load the imt slot, which contains a function descriptor. */
7804 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, slot_reg, vtable_reg, offset);
7806 /* Load the address + arg of the imt thunk from the imt slot */
7807 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_addr_ins, OP_LOAD_MEMBASE, addr_reg, slot_reg, 0);
7808 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_arg_ins, OP_LOAD_MEMBASE, arg_reg, slot_reg, SIZEOF_VOID_P);
7810 * IMT thunks in llvm-only mode are C functions which take an info argument
7811 * plus the imt method and return the ftndesc to call.
7813 icall_args [0] = thunk_arg_ins;
7814 icall_args [1] = emit_get_rgctx_method (cfg, context_used,
7815 cmethod, MONO_RGCTX_INFO_METHOD);
7816 ftndesc_ins = mono_emit_calli (cfg, helper_sig_llvmonly_imt_trampoline, icall_args, thunk_addr_ins, NULL, NULL);
7818 return emit_llvmonly_calli (cfg, fsig, sp, ftndesc_ins);
7821 if ((fsig->generic_param_count || variant_iface) && !is_gsharedvt) {
7823 * This is similar to the interface case, the vtable slot points to an imt thunk which is
7824 * dynamically extended as more instantiations are discovered.
7825 * This handles generic virtual methods both on classes and interfaces.
7827 int slot_reg = alloc_preg (cfg);
7828 int addr_reg = alloc_preg (cfg);
7829 int arg_reg = alloc_preg (cfg);
7830 int ftndesc_reg = alloc_preg (cfg);
7831 MonoInst *thunk_addr_ins, *thunk_arg_ins, *ftndesc_ins;
7832 MonoBasicBlock *slowpath_bb, *end_bb;
7834 NEW_BBLOCK (cfg, slowpath_bb);
7835 NEW_BBLOCK (cfg, end_bb);
7837 vtable_reg = alloc_preg (cfg);
7838 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_ins, OP_LOAD_MEMBASE, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
7840 offset = ((gint32)slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
7842 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) + (slot * SIZEOF_VOID_P);
7844 /* Load the slot, which contains a function descriptor. */
7845 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, slot_reg, vtable_reg, offset);
7847 /* These slots are not initialized, so fall back to the slow path until they are initialized */
7848 /* That happens when mono_method_add_generic_virtual_invocation () creates an IMT thunk */
7849 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, slot_reg, 0);
7850 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, slowpath_bb);
7853 /* Same as with iface calls */
7854 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_addr_ins, OP_LOAD_MEMBASE, addr_reg, slot_reg, 0);
7855 EMIT_NEW_LOAD_MEMBASE (cfg, thunk_arg_ins, OP_LOAD_MEMBASE, arg_reg, slot_reg, SIZEOF_VOID_P);
7856 icall_args [0] = thunk_arg_ins;
7857 icall_args [1] = emit_get_rgctx_method (cfg, context_used,
7858 cmethod, MONO_RGCTX_INFO_METHOD);
7859 ftndesc_ins = mono_emit_calli (cfg, helper_sig_llvmonly_imt_trampoline, icall_args, thunk_addr_ins, NULL, NULL);
7860 ftndesc_ins->dreg = ftndesc_reg;
7862 * Unlike normal iface calls, these imt thunks can return NULL, i.e. when they are passed an instantiation
7863 * they don't know about yet. Fall back to the slowpath in that case.
7865 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, ftndesc_reg, 0);
7866 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, slowpath_bb);
7868 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
7871 MONO_START_BB (cfg, slowpath_bb);
7872 icall_args [0] = vtable_ins;
7873 EMIT_NEW_ICONST (cfg, icall_args [1], slot);
7874 icall_args [2] = emit_get_rgctx_method (cfg, context_used,
7875 cmethod, MONO_RGCTX_INFO_METHOD);
7877 ftndesc_ins = mono_emit_jit_icall (cfg, mono_resolve_generic_virtual_iface_call, icall_args);
7879 ftndesc_ins = mono_emit_jit_icall (cfg, mono_resolve_generic_virtual_call, icall_args);
7880 ftndesc_ins->dreg = ftndesc_reg;
7881 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
7884 MONO_START_BB (cfg, end_bb);
7885 return emit_llvmonly_calli (cfg, fsig, sp, ftndesc_ins);
7889 * Non-optimized cases
7891 icall_args [0] = sp [0];
7892 EMIT_NEW_ICONST (cfg, icall_args [1], slot);
7894 icall_args [2] = emit_get_rgctx_method (cfg, context_used,
7895 cmethod, MONO_RGCTX_INFO_METHOD);
7897 arg_reg = alloc_preg (cfg);
7898 MONO_EMIT_NEW_PCONST (cfg, arg_reg, NULL);
7899 EMIT_NEW_VARLOADA_VREG (cfg, icall_args [3], arg_reg, &mono_defaults.int_class->byval_arg);
7901 g_assert (is_gsharedvt);
7903 call_target = mono_emit_jit_icall (cfg, mono_resolve_iface_call_gsharedvt, icall_args);
7905 call_target = mono_emit_jit_icall (cfg, mono_resolve_vcall_gsharedvt, icall_args);
7908 * Pass the extra argument even if the callee doesn't receive it, most
7909 * calling conventions allow this.
7911 return emit_extra_arg_calli (cfg, fsig, sp, arg_reg, call_target);
7915 is_exception_class (MonoClass *klass)
7918 if (klass == mono_defaults.exception_class)
7920 klass = klass->parent;
7926 * is_jit_optimizer_disabled:
7928 * Determine whenever M's assembly has a DebuggableAttribute with the
7929 * IsJITOptimizerDisabled flag set.
7932 is_jit_optimizer_disabled (MonoMethod *m)
7935 MonoAssembly *ass = m->klass->image->assembly;
7936 MonoCustomAttrInfo* attrs;
7939 gboolean val = FALSE;
7942 if (ass->jit_optimizer_disabled_inited)
7943 return ass->jit_optimizer_disabled;
7945 klass = mono_class_try_get_debuggable_attribute_class ();
7949 ass->jit_optimizer_disabled = FALSE;
7950 mono_memory_barrier ();
7951 ass->jit_optimizer_disabled_inited = TRUE;
7955 attrs = mono_custom_attrs_from_assembly_checked (ass, &error);
7956 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7958 for (i = 0; i < attrs->num_attrs; ++i) {
7959 MonoCustomAttrEntry *attr = &attrs->attrs [i];
7961 MonoMethodSignature *sig;
7963 if (!attr->ctor || attr->ctor->klass != klass)
7965 /* Decode the attribute. See reflection.c */
7966 p = (const char*)attr->data;
7967 g_assert (read16 (p) == 0x0001);
7970 // FIXME: Support named parameters
7971 sig = mono_method_signature (attr->ctor);
7972 if (sig->param_count != 2 || sig->params [0]->type != MONO_TYPE_BOOLEAN || sig->params [1]->type != MONO_TYPE_BOOLEAN)
7974 /* Two boolean arguments */
7978 mono_custom_attrs_free (attrs);
7981 ass->jit_optimizer_disabled = val;
7982 mono_memory_barrier ();
7983 ass->jit_optimizer_disabled_inited = TRUE;
7989 is_supported_tail_call (MonoCompile *cfg, MonoMethod *method, MonoMethod *cmethod, MonoMethodSignature *fsig, int call_opcode)
7991 gboolean supported_tail_call;
7994 supported_tail_call = mono_arch_tail_call_supported (cfg, mono_method_signature (method), mono_method_signature (cmethod));
7996 for (i = 0; i < fsig->param_count; ++i) {
7997 if (fsig->params [i]->byref || fsig->params [i]->type == MONO_TYPE_PTR || fsig->params [i]->type == MONO_TYPE_FNPTR)
7998 /* These can point to the current method's stack */
7999 supported_tail_call = FALSE;
8001 if (fsig->hasthis && cmethod->klass->valuetype)
8002 /* this might point to the current method's stack */
8003 supported_tail_call = FALSE;
8004 if (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)
8005 supported_tail_call = FALSE;
8006 if (cfg->method->save_lmf)
8007 supported_tail_call = FALSE;
8008 if (cmethod->wrapper_type && cmethod->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
8009 supported_tail_call = FALSE;
8010 if (call_opcode != CEE_CALL)
8011 supported_tail_call = FALSE;
8013 /* Debugging support */
8015 if (supported_tail_call) {
8016 if (!mono_debug_count ())
8017 supported_tail_call = FALSE;
8021 return supported_tail_call;
8027 * Handle calls made to ctors from NEWOBJ opcodes.
8030 handle_ctor_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used,
8031 MonoInst **sp, guint8 *ip, int *inline_costs)
8033 MonoInst *vtable_arg = NULL, *callvirt_this_arg = NULL, *ins;
8035 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
8036 mono_method_is_generic_sharable (cmethod, TRUE)) {
8037 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
8038 mono_class_vtable (cfg->domain, cmethod->klass);
8039 CHECK_TYPELOAD (cmethod->klass);
8041 vtable_arg = emit_get_rgctx_method (cfg, context_used,
8042 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
8045 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
8046 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
8048 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
8050 CHECK_TYPELOAD (cmethod->klass);
8051 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
8056 /* Avoid virtual calls to ctors if possible */
8057 if (mono_class_is_marshalbyref (cmethod->klass))
8058 callvirt_this_arg = sp [0];
8060 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
8061 g_assert (MONO_TYPE_IS_VOID (fsig->ret));
8062 CHECK_CFG_EXCEPTION;
8063 } else if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
8064 mono_method_check_inlining (cfg, cmethod) &&
8065 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE)) {
8068 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, FALSE))) {
8069 cfg->real_offset += 5;
8071 *inline_costs += costs - 5;
8073 INLINE_FAILURE ("inline failure");
8074 // FIXME-VT: Clean this up
8075 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig))
8076 GSHAREDVT_FAILURE(*ip);
8077 mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, callvirt_this_arg, NULL, NULL);
8079 } else if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
8082 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE);
8084 if (cfg->llvm_only) {
8085 // FIXME: Avoid initializing vtable_arg
8086 emit_llvmonly_calli (cfg, fsig, sp, addr);
8088 mono_emit_calli (cfg, fsig, sp, addr, NULL, vtable_arg);
8090 } else if (context_used &&
8091 ((!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
8092 !mono_class_generic_sharing_enabled (cmethod->klass)) || cfg->gsharedvt)) {
8093 MonoInst *cmethod_addr;
8095 /* Generic calls made out of gsharedvt methods cannot be patched, so use an indirect call */
8097 if (cfg->llvm_only) {
8098 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, cmethod,
8099 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
8100 emit_llvmonly_calli (cfg, fsig, sp, addr);
8102 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
8103 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
8105 mono_emit_calli (cfg, fsig, sp, cmethod_addr, NULL, vtable_arg);
8108 INLINE_FAILURE ("ctor call");
8109 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp,
8110 callvirt_this_arg, NULL, vtable_arg);
8117 emit_setret (MonoCompile *cfg, MonoInst *val)
8119 MonoType *ret_type = mini_get_underlying_type (mono_method_signature (cfg->method)->ret);
8122 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
8125 if (!cfg->vret_addr) {
8126 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, val);
8128 EMIT_NEW_RETLOADA (cfg, ret_addr);
8130 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, val->dreg);
8131 ins->klass = mono_class_from_mono_type (ret_type);
8134 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
8135 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
8136 MonoInst *iargs [1];
8140 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
8141 mono_arch_emit_setret (cfg, cfg->method, conv);
8143 mono_arch_emit_setret (cfg, cfg->method, val);
8146 mono_arch_emit_setret (cfg, cfg->method, val);
8152 * mono_method_to_ir:
8154 * Translate the .net IL into linear IR.
8157 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
8158 MonoInst *return_var, MonoInst **inline_args,
8159 guint inline_offset, gboolean is_virtual_call)
8162 MonoInst *ins, **sp, **stack_start;
8163 MonoBasicBlock *tblock = NULL, *init_localsbb = NULL;
8164 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
8165 MonoMethod *cmethod, *method_definition;
8166 MonoInst **arg_array;
8167 MonoMethodHeader *header;
8169 guint32 token, ins_flag;
8171 MonoClass *constrained_class = NULL;
8172 unsigned char *ip, *end, *target, *err_pos;
8173 MonoMethodSignature *sig;
8174 MonoGenericContext *generic_context = NULL;
8175 MonoGenericContainer *generic_container = NULL;
8176 MonoType **param_types;
8177 int i, n, start_new_bblock, dreg;
8178 int num_calls = 0, inline_costs = 0;
8179 int breakpoint_id = 0;
8181 GSList *class_inits = NULL;
8182 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
8184 gboolean init_locals, seq_points, skip_dead_blocks;
8185 gboolean sym_seq_points = FALSE;
8186 MonoDebugMethodInfo *minfo;
8187 MonoBitSet *seq_point_locs = NULL;
8188 MonoBitSet *seq_point_set_locs = NULL;
8190 cfg->disable_inline = is_jit_optimizer_disabled (method);
8192 /* serialization and xdomain stuff may need access to private fields and methods */
8193 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
8194 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
8195 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
8196 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
8197 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
8198 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
8200 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
8201 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
8202 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
8203 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
8204 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_STELEMREF;
8206 image = method->klass->image;
8207 header = mono_method_get_header_checked (method, &cfg->error);
8209 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
8210 goto exception_exit;
8212 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
8215 generic_container = mono_method_get_generic_container (method);
8216 sig = mono_method_signature (method);
8217 num_args = sig->hasthis + sig->param_count;
8218 ip = (unsigned char*)header->code;
8219 cfg->cil_start = ip;
8220 end = ip + header->code_size;
8221 cfg->stat_cil_code_size += header->code_size;
8223 seq_points = cfg->gen_seq_points && cfg->method == method;
8225 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
8226 /* We could hit a seq point before attaching to the JIT (#8338) */
8230 if (cfg->gen_sdb_seq_points && cfg->method == method) {
8231 minfo = mono_debug_lookup_method (method);
8233 MonoSymSeqPoint *sps;
8234 int i, n_il_offsets;
8236 mono_debug_get_seq_points (minfo, NULL, NULL, NULL, &sps, &n_il_offsets);
8237 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
8238 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
8239 sym_seq_points = TRUE;
8240 for (i = 0; i < n_il_offsets; ++i) {
8241 if (sps [i].il_offset < header->code_size)
8242 mono_bitset_set_fast (seq_point_locs, sps [i].il_offset);
8245 } else if (!method->wrapper_type && !method->dynamic && mono_debug_image_has_debug_info (method->klass->image)) {
8246 /* Methods without line number info like auto-generated property accessors */
8247 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
8248 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
8249 sym_seq_points = TRUE;
8254 * Methods without init_locals set could cause asserts in various passes
8255 * (#497220). To work around this, we emit dummy initialization opcodes
8256 * (OP_DUMMY_ICONST etc.) which generate no code. These are only supported
8257 * on some platforms.
8259 if ((cfg->opt & MONO_OPT_UNSAFE) && cfg->backend->have_dummy_init)
8260 init_locals = header->init_locals;
8264 method_definition = method;
8265 while (method_definition->is_inflated) {
8266 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
8267 method_definition = imethod->declaring;
8270 /* SkipVerification is not allowed if core-clr is enabled */
8271 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
8273 dont_verify_stloc = TRUE;
8276 if (sig->is_inflated)
8277 generic_context = mono_method_get_context (method);
8278 else if (generic_container)
8279 generic_context = &generic_container->context;
8280 cfg->generic_context = generic_context;
8283 g_assert (!sig->has_type_parameters);
8285 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
8286 g_assert (method->is_inflated);
8287 g_assert (mono_method_get_context (method)->method_inst);
8289 if (method->is_inflated && mono_method_get_context (method)->method_inst)
8290 g_assert (sig->generic_param_count);
8292 if (cfg->method == method) {
8293 cfg->real_offset = 0;
8295 cfg->real_offset = inline_offset;
8298 cfg->cil_offset_to_bb = (MonoBasicBlock **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
8299 cfg->cil_offset_to_bb_len = header->code_size;
8301 cfg->current_method = method;
8303 if (cfg->verbose_level > 2)
8304 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
8306 param_types = (MonoType **)mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
8308 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
8309 for (n = 0; n < sig->param_count; ++n)
8310 param_types [n + sig->hasthis] = sig->params [n];
8311 cfg->arg_types = param_types;
8313 cfg->dont_inline = g_list_prepend (cfg->dont_inline, method);
8314 if (cfg->method == method) {
8316 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
8317 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
8320 NEW_BBLOCK (cfg, start_bblock);
8321 cfg->bb_entry = start_bblock;
8322 start_bblock->cil_code = NULL;
8323 start_bblock->cil_length = 0;
8326 NEW_BBLOCK (cfg, end_bblock);
8327 cfg->bb_exit = end_bblock;
8328 end_bblock->cil_code = NULL;
8329 end_bblock->cil_length = 0;
8330 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
8331 g_assert (cfg->num_bblocks == 2);
8333 arg_array = cfg->args;
8335 if (header->num_clauses) {
8336 cfg->spvars = g_hash_table_new (NULL, NULL);
8337 cfg->exvars = g_hash_table_new (NULL, NULL);
8339 /* handle exception clauses */
8340 for (i = 0; i < header->num_clauses; ++i) {
8341 MonoBasicBlock *try_bb;
8342 MonoExceptionClause *clause = &header->clauses [i];
8343 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
8345 try_bb->real_offset = clause->try_offset;
8346 try_bb->try_start = TRUE;
8347 try_bb->region = ((i + 1) << 8) | clause->flags;
8348 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
8349 tblock->real_offset = clause->handler_offset;
8350 tblock->flags |= BB_EXCEPTION_HANDLER;
8353 * Linking the try block with the EH block hinders inlining as we won't be able to
8354 * merge the bblocks from inlining and produce an artificial hole for no good reason.
8356 if (COMPILE_LLVM (cfg))
8357 link_bblock (cfg, try_bb, tblock);
8359 if (*(ip + clause->handler_offset) == CEE_POP)
8360 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
8362 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
8363 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
8364 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
8365 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
8366 MONO_ADD_INS (tblock, ins);
8368 if (seq_points && clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY && clause->flags != MONO_EXCEPTION_CLAUSE_FILTER) {
8369 /* finally clauses already have a seq point */
8370 /* seq points for filter clauses are emitted below */
8371 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
8372 MONO_ADD_INS (tblock, ins);
8375 /* todo: is a fault block unsafe to optimize? */
8376 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
8377 tblock->flags |= BB_EXCEPTION_UNSAFE;
8380 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
8382 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
8384 /* catch and filter blocks get the exception object on the stack */
8385 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
8386 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
8388 /* mostly like handle_stack_args (), but just sets the input args */
8389 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
8390 tblock->in_scount = 1;
8391 tblock->in_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
8392 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
8396 #ifdef MONO_CONTEXT_SET_LLVM_EXC_REG
8397 /* The EH code passes in the exception in a register to both JITted and LLVM compiled code */
8398 if (!cfg->compile_llvm) {
8399 MONO_INST_NEW (cfg, ins, OP_GET_EX_OBJ);
8400 ins->dreg = tblock->in_stack [0]->dreg;
8401 MONO_ADD_INS (tblock, ins);
8404 MonoInst *dummy_use;
8407 * Add a dummy use for the exvar so its liveness info will be
8410 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
8413 if (seq_points && clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
8414 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
8415 MONO_ADD_INS (tblock, ins);
8418 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
8419 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
8420 tblock->flags |= BB_EXCEPTION_HANDLER;
8421 tblock->real_offset = clause->data.filter_offset;
8422 tblock->in_scount = 1;
8423 tblock->in_stack = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
8424 /* The filter block shares the exvar with the handler block */
8425 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
8426 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
8427 MONO_ADD_INS (tblock, ins);
8431 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
8432 clause->data.catch_class &&
8434 mono_class_check_context_used (clause->data.catch_class)) {
8436 * In shared generic code with catch
8437 * clauses containing type variables
8438 * the exception handling code has to
8439 * be able to get to the rgctx.
8440 * Therefore we have to make sure that
8441 * the vtable/mrgctx argument (for
8442 * static or generic methods) or the
8443 * "this" argument (for non-static
8444 * methods) are live.
8446 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
8447 mini_method_get_context (method)->method_inst ||
8448 method->klass->valuetype) {
8449 mono_get_vtable_var (cfg);
8451 MonoInst *dummy_use;
8453 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
8458 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
8459 cfg->cbb = start_bblock;
8460 cfg->args = arg_array;
8461 mono_save_args (cfg, sig, inline_args);
8464 /* FIRST CODE BLOCK */
8465 NEW_BBLOCK (cfg, tblock);
8466 tblock->cil_code = ip;
8470 ADD_BBLOCK (cfg, tblock);
8472 if (cfg->method == method) {
8473 breakpoint_id = mono_debugger_method_has_breakpoint (method);
8474 if (breakpoint_id) {
8475 MONO_INST_NEW (cfg, ins, OP_BREAK);
8476 MONO_ADD_INS (cfg->cbb, ins);
8480 /* we use a separate basic block for the initialization code */
8481 NEW_BBLOCK (cfg, init_localsbb);
8482 if (cfg->method == method)
8483 cfg->bb_init = init_localsbb;
8484 init_localsbb->real_offset = cfg->real_offset;
8485 start_bblock->next_bb = init_localsbb;
8486 init_localsbb->next_bb = cfg->cbb;
8487 link_bblock (cfg, start_bblock, init_localsbb);
8488 link_bblock (cfg, init_localsbb, cfg->cbb);
8490 cfg->cbb = init_localsbb;
8492 if (cfg->gsharedvt && cfg->method == method) {
8493 MonoGSharedVtMethodInfo *info;
8494 MonoInst *var, *locals_var;
8497 info = (MonoGSharedVtMethodInfo *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoGSharedVtMethodInfo));
8498 info->method = cfg->method;
8499 info->count_entries = 16;
8500 info->entries = (MonoRuntimeGenericContextInfoTemplate *)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
8501 cfg->gsharedvt_info = info;
8503 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
8504 /* prevent it from being register allocated */
8505 //var->flags |= MONO_INST_VOLATILE;
8506 cfg->gsharedvt_info_var = var;
8508 ins = emit_get_rgctx_gsharedvt_method (cfg, mini_method_check_context_used (cfg, method), method, info);
8509 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, var->dreg, ins->dreg);
8511 /* Allocate locals */
8512 locals_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
8513 /* prevent it from being register allocated */
8514 //locals_var->flags |= MONO_INST_VOLATILE;
8515 cfg->gsharedvt_locals_var = locals_var;
8517 dreg = alloc_ireg (cfg);
8518 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, dreg, var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, locals_size));
8520 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
8521 ins->dreg = locals_var->dreg;
8523 MONO_ADD_INS (cfg->cbb, ins);
8524 cfg->gsharedvt_locals_var_ins = ins;
8526 cfg->flags |= MONO_CFG_HAS_ALLOCA;
8529 ins->flags |= MONO_INST_INIT;
8533 if (mono_security_core_clr_enabled ()) {
8534 /* check if this is native code, e.g. an icall or a p/invoke */
8535 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
8536 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
8538 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
8539 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
8541 /* if this ia a native call then it can only be JITted from platform code */
8542 if ((icall || pinvk) && method->klass && method->klass->image) {
8543 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
8544 MonoException *ex = icall ? mono_get_exception_security () :
8545 mono_get_exception_method_access ();
8546 emit_throw_exception (cfg, ex);
8553 CHECK_CFG_EXCEPTION;
8555 if (header->code_size == 0)
8558 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
8563 if (cfg->method == method)
8564 mono_debug_init_method (cfg, cfg->cbb, breakpoint_id);
8566 for (n = 0; n < header->num_locals; ++n) {
8567 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
8572 /* We force the vtable variable here for all shared methods
8573 for the possibility that they might show up in a stack
8574 trace where their exact instantiation is needed. */
8575 if (cfg->gshared && method == cfg->method) {
8576 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
8577 mini_method_get_context (method)->method_inst ||
8578 method->klass->valuetype) {
8579 mono_get_vtable_var (cfg);
8581 /* FIXME: Is there a better way to do this?
8582 We need the variable live for the duration
8583 of the whole method. */
8584 cfg->args [0]->flags |= MONO_INST_VOLATILE;
8588 /* add a check for this != NULL to inlined methods */
8589 if (is_virtual_call) {
8592 NEW_ARGLOAD (cfg, arg_ins, 0);
8593 MONO_ADD_INS (cfg->cbb, arg_ins);
8594 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
8597 skip_dead_blocks = !dont_verify;
8598 if (skip_dead_blocks) {
8599 original_bb = bb = mono_basic_block_split (method, &cfg->error, header);
8604 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
8605 stack_start = sp = (MonoInst **)mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
8608 start_new_bblock = 0;
8610 if (cfg->method == method)
8611 cfg->real_offset = ip - header->code;
8613 cfg->real_offset = inline_offset;
8618 if (start_new_bblock) {
8619 cfg->cbb->cil_length = ip - cfg->cbb->cil_code;
8620 if (start_new_bblock == 2) {
8621 g_assert (ip == tblock->cil_code);
8623 GET_BBLOCK (cfg, tblock, ip);
8625 cfg->cbb->next_bb = tblock;
8627 start_new_bblock = 0;
8628 for (i = 0; i < cfg->cbb->in_scount; ++i) {
8629 if (cfg->verbose_level > 3)
8630 printf ("loading %d from temp %d\n", i, (int)cfg->cbb->in_stack [i]->inst_c0);
8631 EMIT_NEW_TEMPLOAD (cfg, ins, cfg->cbb->in_stack [i]->inst_c0);
8635 g_slist_free (class_inits);
8638 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != cfg->cbb)) {
8639 link_bblock (cfg, cfg->cbb, tblock);
8640 if (sp != stack_start) {
8641 handle_stack_args (cfg, stack_start, sp - stack_start);
8643 CHECK_UNVERIFIABLE (cfg);
8645 cfg->cbb->next_bb = tblock;
8647 for (i = 0; i < cfg->cbb->in_scount; ++i) {
8648 if (cfg->verbose_level > 3)
8649 printf ("loading %d from temp %d\n", i, (int)cfg->cbb->in_stack [i]->inst_c0);
8650 EMIT_NEW_TEMPLOAD (cfg, ins, cfg->cbb->in_stack [i]->inst_c0);
8653 g_slist_free (class_inits);
8658 if (skip_dead_blocks) {
8659 int ip_offset = ip - header->code;
8661 if (ip_offset == bb->end)
8665 int op_size = mono_opcode_size (ip, end);
8666 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
8668 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
8670 if (ip_offset + op_size == bb->end) {
8671 MONO_INST_NEW (cfg, ins, OP_NOP);
8672 MONO_ADD_INS (cfg->cbb, ins);
8673 start_new_bblock = 1;
8681 * Sequence points are points where the debugger can place a breakpoint.
8682 * Currently, we generate these automatically at points where the IL
8685 if (seq_points && ((!sym_seq_points && (sp == stack_start)) || (sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code)))) {
8687 * Make methods interruptable at the beginning, and at the targets of
8688 * backward branches.
8689 * Also, do this at the start of every bblock in methods with clauses too,
8690 * to be able to handle instructions with inprecise control flow like
8692 * Backward branches are handled at the end of method-to-ir ().
8694 gboolean intr_loc = ip == header->code || (!cfg->cbb->last_ins && cfg->header->num_clauses);
8695 gboolean sym_seq_point = sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code);
8697 /* Avoid sequence points on empty IL like .volatile */
8698 // FIXME: Enable this
8699 //if (!(cfg->cbb->last_ins && cfg->cbb->last_ins->opcode == OP_SEQ_POINT)) {
8700 NEW_SEQ_POINT (cfg, ins, ip - header->code, intr_loc);
8701 if ((sp != stack_start) && !sym_seq_point)
8702 ins->flags |= MONO_INST_NONEMPTY_STACK;
8703 MONO_ADD_INS (cfg->cbb, ins);
8706 mono_bitset_set_fast (seq_point_set_locs, ip - header->code);
8709 cfg->cbb->real_offset = cfg->real_offset;
8711 if ((cfg->method == method) && cfg->coverage_info) {
8712 guint32 cil_offset = ip - header->code;
8713 cfg->coverage_info->data [cil_offset].cil_code = ip;
8715 /* TODO: Use an increment here */
8716 #if defined(TARGET_X86)
8717 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
8718 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
8720 MONO_ADD_INS (cfg->cbb, ins);
8722 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
8723 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
8727 if (cfg->verbose_level > 3)
8728 printf ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
8732 if (seq_points && !sym_seq_points && sp != stack_start) {
8734 * The C# compiler uses these nops to notify the JIT that it should
8735 * insert seq points.
8737 NEW_SEQ_POINT (cfg, ins, ip - header->code, FALSE);
8738 MONO_ADD_INS (cfg->cbb, ins);
8740 if (cfg->keep_cil_nops)
8741 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
8743 MONO_INST_NEW (cfg, ins, OP_NOP);
8745 MONO_ADD_INS (cfg->cbb, ins);
8748 if (should_insert_brekpoint (cfg->method)) {
8749 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
8751 MONO_INST_NEW (cfg, ins, OP_NOP);
8754 MONO_ADD_INS (cfg->cbb, ins);
8760 CHECK_STACK_OVF (1);
8761 n = (*ip)-CEE_LDARG_0;
8763 EMIT_NEW_ARGLOAD (cfg, ins, n);
8771 CHECK_STACK_OVF (1);
8772 n = (*ip)-CEE_LDLOC_0;
8774 EMIT_NEW_LOCLOAD (cfg, ins, n);
8783 n = (*ip)-CEE_STLOC_0;
8786 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
8788 emit_stloc_ir (cfg, sp, header, n);
8795 CHECK_STACK_OVF (1);
8798 EMIT_NEW_ARGLOAD (cfg, ins, n);
8804 CHECK_STACK_OVF (1);
8807 NEW_ARGLOADA (cfg, ins, n);
8808 MONO_ADD_INS (cfg->cbb, ins);
8818 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
8820 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
8825 CHECK_STACK_OVF (1);
8828 EMIT_NEW_LOCLOAD (cfg, ins, n);
8832 case CEE_LDLOCA_S: {
8833 unsigned char *tmp_ip;
8835 CHECK_STACK_OVF (1);
8836 CHECK_LOCAL (ip [1]);
8838 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
8844 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
8853 CHECK_LOCAL (ip [1]);
8854 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
8856 emit_stloc_ir (cfg, sp, header, ip [1]);
8861 CHECK_STACK_OVF (1);
8862 EMIT_NEW_PCONST (cfg, ins, NULL);
8863 ins->type = STACK_OBJ;
8868 CHECK_STACK_OVF (1);
8869 EMIT_NEW_ICONST (cfg, ins, -1);
8882 CHECK_STACK_OVF (1);
8883 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
8889 CHECK_STACK_OVF (1);
8891 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
8897 CHECK_STACK_OVF (1);
8898 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
8904 CHECK_STACK_OVF (1);
8905 MONO_INST_NEW (cfg, ins, OP_I8CONST);
8906 ins->type = STACK_I8;
8907 ins->dreg = alloc_dreg (cfg, STACK_I8);
8909 ins->inst_l = (gint64)read64 (ip);
8910 MONO_ADD_INS (cfg->cbb, ins);
8916 gboolean use_aotconst = FALSE;
8918 #ifdef TARGET_POWERPC
8919 /* FIXME: Clean this up */
8920 if (cfg->compile_aot)
8921 use_aotconst = TRUE;
8924 /* FIXME: we should really allocate this only late in the compilation process */
8925 f = (float *)mono_domain_alloc (cfg->domain, sizeof (float));
8927 CHECK_STACK_OVF (1);
8933 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
8935 dreg = alloc_freg (cfg);
8936 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
8937 ins->type = cfg->r4_stack_type;
8939 MONO_INST_NEW (cfg, ins, OP_R4CONST);
8940 ins->type = cfg->r4_stack_type;
8941 ins->dreg = alloc_dreg (cfg, STACK_R8);
8943 MONO_ADD_INS (cfg->cbb, ins);
8953 gboolean use_aotconst = FALSE;
8955 #ifdef TARGET_POWERPC
8956 /* FIXME: Clean this up */
8957 if (cfg->compile_aot)
8958 use_aotconst = TRUE;
8961 /* FIXME: we should really allocate this only late in the compilation process */
8962 d = (double *)mono_domain_alloc (cfg->domain, sizeof (double));
8964 CHECK_STACK_OVF (1);
8970 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
8972 dreg = alloc_freg (cfg);
8973 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
8974 ins->type = STACK_R8;
8976 MONO_INST_NEW (cfg, ins, OP_R8CONST);
8977 ins->type = STACK_R8;
8978 ins->dreg = alloc_dreg (cfg, STACK_R8);
8980 MONO_ADD_INS (cfg->cbb, ins);
8989 MonoInst *temp, *store;
8991 CHECK_STACK_OVF (1);
8995 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
8996 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
8998 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
9001 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
9014 if (sp [0]->type == STACK_R8)
9015 /* we need to pop the value from the x86 FP stack */
9016 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
9021 MonoMethodSignature *fsig;
9024 INLINE_FAILURE ("jmp");
9025 GSHAREDVT_FAILURE (*ip);
9028 if (stack_start != sp)
9030 token = read32 (ip + 1);
9031 /* FIXME: check the signature matches */
9032 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
9035 if (cfg->gshared && mono_method_check_context_used (cmethod))
9036 GENERIC_SHARING_FAILURE (CEE_JMP);
9038 emit_instrumentation_call (cfg, mono_profiler_method_leave);
9040 fsig = mono_method_signature (cmethod);
9041 n = fsig->param_count + fsig->hasthis;
9042 if (cfg->llvm_only) {
9045 args = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
9046 for (i = 0; i < n; ++i)
9047 EMIT_NEW_ARGLOAD (cfg, args [i], i);
9048 ins = mono_emit_method_call_full (cfg, cmethod, fsig, TRUE, args, NULL, NULL, NULL);
9050 * The code in mono-basic-block.c treats the rest of the code as dead, but we
9051 * have to emit a normal return since llvm expects it.
9054 emit_setret (cfg, ins);
9055 MONO_INST_NEW (cfg, ins, OP_BR);
9056 ins->inst_target_bb = end_bblock;
9057 MONO_ADD_INS (cfg->cbb, ins);
9058 link_bblock (cfg, cfg->cbb, end_bblock);
9061 } else if (cfg->backend->have_op_tail_call) {
9062 /* Handle tail calls similarly to calls */
9065 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
9066 call->method = cmethod;
9067 call->tail_call = TRUE;
9068 call->signature = mono_method_signature (cmethod);
9069 call->args = (MonoInst **)mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
9070 call->inst.inst_p0 = cmethod;
9071 for (i = 0; i < n; ++i)
9072 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
9074 if (mini_type_is_vtype (mini_get_underlying_type (call->signature->ret)))
9075 call->vret_var = cfg->vret_addr;
9077 mono_arch_emit_call (cfg, call);
9078 cfg->param_area = MAX(cfg->param_area, call->stack_usage);
9079 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
9081 for (i = 0; i < num_args; ++i)
9082 /* Prevent arguments from being optimized away */
9083 arg_array [i]->flags |= MONO_INST_VOLATILE;
9085 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
9086 ins = (MonoInst*)call;
9087 ins->inst_p0 = cmethod;
9088 MONO_ADD_INS (cfg->cbb, ins);
9092 start_new_bblock = 1;
9097 MonoMethodSignature *fsig;
9100 token = read32 (ip + 1);
9104 //GSHAREDVT_FAILURE (*ip);
9109 fsig = mini_get_signature (method, token, generic_context, &cfg->error);
9112 if (method->dynamic && fsig->pinvoke) {
9116 * This is a call through a function pointer using a pinvoke
9117 * signature. Have to create a wrapper and call that instead.
9118 * FIXME: This is very slow, need to create a wrapper at JIT time
9119 * instead based on the signature.
9121 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
9122 EMIT_NEW_PCONST (cfg, args [1], fsig);
9124 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
9127 n = fsig->param_count + fsig->hasthis;
9131 //g_assert (!virtual_ || fsig->hasthis);
9135 inline_costs += 10 * num_calls++;
9138 * Making generic calls out of gsharedvt methods.
9139 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
9140 * patching gshared method addresses into a gsharedvt method.
9142 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
9144 * We pass the address to the gsharedvt trampoline in the rgctx reg
9146 MonoInst *callee = addr;
9148 if (method->wrapper_type != MONO_WRAPPER_DELEGATE_INVOKE)
9150 GSHAREDVT_FAILURE (*ip);
9154 GSHAREDVT_FAILURE (*ip);
9156 addr = emit_get_rgctx_sig (cfg, context_used,
9157 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
9158 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, callee);
9162 /* Prevent inlining of methods with indirect calls */
9163 INLINE_FAILURE ("indirect call");
9165 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST || addr->opcode == OP_GOT_ENTRY) {
9166 MonoJumpInfoType info_type;
9170 * Instead of emitting an indirect call, emit a direct call
9171 * with the contents of the aotconst as the patch info.
9173 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST) {
9174 info_type = (MonoJumpInfoType)addr->inst_c1;
9175 info_data = addr->inst_p0;
9177 info_type = (MonoJumpInfoType)addr->inst_right->inst_c1;
9178 info_data = addr->inst_right->inst_left;
9181 if (info_type == MONO_PATCH_INFO_ICALL_ADDR) {
9182 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR_CALL, info_data, fsig, sp);
9185 } else if (info_type == MONO_PATCH_INFO_JIT_ICALL_ADDR) {
9186 ins = (MonoInst*)mono_emit_abs_call (cfg, info_type, info_data, fsig, sp);
9191 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9195 /* End of call, INS should contain the result of the call, if any */
9197 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9199 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
9202 CHECK_CFG_EXCEPTION;
9206 constrained_class = NULL;
9210 case CEE_CALLVIRT: {
9211 MonoInst *addr = NULL;
9212 MonoMethodSignature *fsig = NULL;
9214 int virtual_ = *ip == CEE_CALLVIRT;
9215 gboolean pass_imt_from_rgctx = FALSE;
9216 MonoInst *imt_arg = NULL;
9217 MonoInst *keep_this_alive = NULL;
9218 gboolean pass_vtable = FALSE;
9219 gboolean pass_mrgctx = FALSE;
9220 MonoInst *vtable_arg = NULL;
9221 gboolean check_this = FALSE;
9222 gboolean supported_tail_call = FALSE;
9223 gboolean tail_call = FALSE;
9224 gboolean need_seq_point = FALSE;
9225 guint32 call_opcode = *ip;
9226 gboolean emit_widen = TRUE;
9227 gboolean push_res = TRUE;
9228 gboolean skip_ret = FALSE;
9229 gboolean delegate_invoke = FALSE;
9230 gboolean direct_icall = FALSE;
9231 gboolean constrained_partial_call = FALSE;
9232 MonoMethod *cil_method;
9235 token = read32 (ip + 1);
9239 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
9242 cil_method = cmethod;
9244 if (constrained_class) {
9245 if ((constrained_class->byval_arg.type == MONO_TYPE_VAR || constrained_class->byval_arg.type == MONO_TYPE_MVAR) && cfg->gshared) {
9246 if (!mini_is_gsharedvt_klass (constrained_class)) {
9247 g_assert (!cmethod->klass->valuetype);
9248 if (!mini_type_is_reference (&constrained_class->byval_arg))
9249 constrained_partial_call = TRUE;
9253 if (method->wrapper_type != MONO_WRAPPER_NONE) {
9254 if (cfg->verbose_level > 2)
9255 printf ("DM Constrained call to %s\n", mono_type_get_full_name (constrained_class));
9256 if (!((constrained_class->byval_arg.type == MONO_TYPE_VAR ||
9257 constrained_class->byval_arg.type == MONO_TYPE_MVAR) &&
9259 cmethod = mono_get_method_constrained_with_method (image, cil_method, constrained_class, generic_context, &cfg->error);
9263 if (cfg->verbose_level > 2)
9264 printf ("Constrained call to %s\n", mono_type_get_full_name (constrained_class));
9266 if ((constrained_class->byval_arg.type == MONO_TYPE_VAR || constrained_class->byval_arg.type == MONO_TYPE_MVAR) && cfg->gshared) {
9268 * This is needed since get_method_constrained can't find
9269 * the method in klass representing a type var.
9270 * The type var is guaranteed to be a reference type in this
9273 if (!mini_is_gsharedvt_klass (constrained_class))
9274 g_assert (!cmethod->klass->valuetype);
9276 cmethod = mono_get_method_constrained_checked (image, token, constrained_class, generic_context, &cil_method, &cfg->error);
9282 if (!dont_verify && !cfg->skip_visibility) {
9283 MonoMethod *target_method = cil_method;
9284 if (method->is_inflated) {
9285 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context), &cfg->error);
9288 if (!mono_method_can_access_method (method_definition, target_method) &&
9289 !mono_method_can_access_method (method, cil_method))
9290 emit_method_access_failure (cfg, method, cil_method);
9293 if (mono_security_core_clr_enabled ())
9294 ensure_method_is_allowed_to_call_method (cfg, method, cil_method);
9296 if (!virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
9297 /* MS.NET seems to silently convert this to a callvirt */
9302 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
9303 * converts to a callvirt.
9305 * tests/bug-515884.il is an example of this behavior
9307 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
9308 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
9309 if (!virtual_ && mono_class_is_marshalbyref (cmethod->klass) && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
9313 if (!cmethod->klass->inited)
9314 if (!mono_class_init (cmethod->klass))
9315 TYPE_LOAD_ERROR (cmethod->klass);
9317 fsig = mono_method_signature (cmethod);
9320 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
9321 mini_class_is_system_array (cmethod->klass)) {
9322 array_rank = cmethod->klass->rank;
9323 } else if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) && icall_is_direct_callable (cfg, cmethod)) {
9324 direct_icall = TRUE;
9325 } else if (fsig->pinvoke) {
9326 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod, TRUE, cfg->compile_aot);
9327 fsig = mono_method_signature (wrapper);
9328 } else if (constrained_class) {
9330 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
9334 if (cfg->llvm_only && !cfg->method->wrapper_type && (!cmethod || cmethod->is_inflated))
9335 cfg->signatures = g_slist_prepend_mempool (cfg->mempool, cfg->signatures, fsig);
9337 /* See code below */
9338 if (cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
9339 MonoBasicBlock *tbb;
9341 GET_BBLOCK (cfg, tbb, ip + 5);
9342 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
9344 * We want to extend the try block to cover the call, but we can't do it if the
9345 * call is made directly since its followed by an exception check.
9347 direct_icall = FALSE;
9351 mono_save_token_info (cfg, image, token, cil_method);
9353 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip + 5 - header->code)))
9354 need_seq_point = TRUE;
9356 /* Don't support calls made using type arguments for now */
9358 if (cfg->gsharedvt) {
9359 if (mini_is_gsharedvt_signature (fsig))
9360 GSHAREDVT_FAILURE (*ip);
9364 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
9365 g_assert_not_reached ();
9367 n = fsig->param_count + fsig->hasthis;
9369 if (!cfg->gshared && cmethod->klass->generic_container)
9373 g_assert (!mono_method_check_context_used (cmethod));
9377 //g_assert (!virtual_ || fsig->hasthis);
9382 * We have the `constrained.' prefix opcode.
9384 if (constrained_class) {
9385 if (mini_is_gsharedvt_klass (constrained_class)) {
9386 if ((cmethod->klass != mono_defaults.object_class) && constrained_class->valuetype && cmethod->klass->valuetype) {
9387 /* The 'Own method' case below */
9388 } else if (cmethod->klass->image != mono_defaults.corlib && !(cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) && !cmethod->klass->valuetype) {
9389 /* 'The type parameter is instantiated as a reference type' case below. */
9391 ins = handle_constrained_gsharedvt_call (cfg, cmethod, fsig, sp, constrained_class, &emit_widen);
9392 CHECK_CFG_EXCEPTION;
9398 if (constrained_partial_call) {
9399 gboolean need_box = TRUE;
9402 * The receiver is a valuetype, but the exact type is not known at compile time. This means the
9403 * called method is not known at compile time either. The called method could end up being
9404 * one of the methods on the parent classes (object/valuetype/enum), in which case we need
9405 * to box the receiver.
9406 * A simple solution would be to box always and make a normal virtual call, but that would
9407 * be bad performance wise.
9409 if (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE && cmethod->klass->generic_class) {
9411 * The parent classes implement no generic interfaces, so the called method will be a vtype method, so no boxing neccessary.
9416 if (!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
9417 /* The called method is not virtual, i.e. Object:GetType (), the receiver is a vtype, has to box */
9418 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
9419 ins->klass = constrained_class;
9420 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
9421 CHECK_CFG_EXCEPTION;
9422 } else if (need_box) {
9424 MonoBasicBlock *is_ref_bb, *end_bb;
9425 MonoInst *nonbox_call;
9428 * Determine at runtime whenever the called method is defined on object/valuetype/enum, and emit a boxing call
9430 * FIXME: It is possible to inline the called method in a lot of cases, i.e. for T_INT,
9431 * the no-box case goes to a method in Int32, while the box case goes to a method in Enum.
9433 addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
9435 NEW_BBLOCK (cfg, is_ref_bb);
9436 NEW_BBLOCK (cfg, end_bb);
9438 box_type = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_BOX_TYPE);
9439 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, box_type->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
9440 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
9443 nonbox_call = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9445 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
9448 MONO_START_BB (cfg, is_ref_bb);
9449 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
9450 ins->klass = constrained_class;
9451 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
9452 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9454 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
9456 MONO_START_BB (cfg, end_bb);
9459 nonbox_call->dreg = ins->dreg;
9462 g_assert (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE);
9463 addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
9464 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9467 } else if (constrained_class->valuetype && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
9469 * The type parameter is instantiated as a valuetype,
9470 * but that type doesn't override the method we're
9471 * calling, so we need to box `this'.
9473 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
9474 ins->klass = constrained_class;
9475 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
9476 CHECK_CFG_EXCEPTION;
9477 } else if (!constrained_class->valuetype) {
9478 int dreg = alloc_ireg_ref (cfg);
9481 * The type parameter is instantiated as a reference
9482 * type. We have a managed pointer on the stack, so
9483 * we need to dereference it here.
9485 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
9486 ins->type = STACK_OBJ;
9489 if (cmethod->klass->valuetype) {
9492 /* Interface method */
9495 mono_class_setup_vtable (constrained_class);
9496 CHECK_TYPELOAD (constrained_class);
9497 ioffset = mono_class_interface_offset (constrained_class, cmethod->klass);
9499 TYPE_LOAD_ERROR (constrained_class);
9500 slot = mono_method_get_vtable_slot (cmethod);
9502 TYPE_LOAD_ERROR (cmethod->klass);
9503 cmethod = constrained_class->vtable [ioffset + slot];
9505 if (cmethod->klass == mono_defaults.enum_class) {
9506 /* Enum implements some interfaces, so treat this as the first case */
9507 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
9508 ins->klass = constrained_class;
9509 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
9510 CHECK_CFG_EXCEPTION;
9515 constrained_class = NULL;
9518 if (check_call_signature (cfg, fsig, sp))
9521 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (cmethod->name, "Invoke"))
9522 delegate_invoke = TRUE;
9524 if ((cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_sharable_method (cfg, cmethod, fsig, sp))) {
9525 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9526 type_to_eval_stack_type ((cfg), fsig->ret, ins);
9534 * If the callee is a shared method, then its static cctor
9535 * might not get called after the call was patched.
9537 if (cfg->gshared && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
9538 emit_class_init (cfg, cmethod->klass);
9539 CHECK_TYPELOAD (cmethod->klass);
9542 check_method_sharing (cfg, cmethod, &pass_vtable, &pass_mrgctx);
9545 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
9547 context_used = mini_method_check_context_used (cfg, cmethod);
9549 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
9550 /* Generic method interface
9551 calls are resolved via a
9552 helper function and don't
9554 if (!cmethod_context || !cmethod_context->method_inst)
9555 pass_imt_from_rgctx = TRUE;
9559 * If a shared method calls another
9560 * shared method then the caller must
9561 * have a generic sharing context
9562 * because the magic trampoline
9563 * requires it. FIXME: We shouldn't
9564 * have to force the vtable/mrgctx
9565 * variable here. Instead there
9566 * should be a flag in the cfg to
9567 * request a generic sharing context.
9570 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
9571 mono_get_vtable_var (cfg);
9576 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
9578 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
9580 CHECK_TYPELOAD (cmethod->klass);
9581 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
9586 g_assert (!vtable_arg);
9588 if (!cfg->compile_aot) {
9590 * emit_get_rgctx_method () calls mono_class_vtable () so check
9591 * for type load errors before.
9593 mono_class_setup_vtable (cmethod->klass);
9594 CHECK_TYPELOAD (cmethod->klass);
9597 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
9599 /* !marshalbyref is needed to properly handle generic methods + remoting */
9600 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
9601 MONO_METHOD_IS_FINAL (cmethod)) &&
9602 !mono_class_is_marshalbyref (cmethod->klass)) {
9609 if (pass_imt_from_rgctx) {
9610 g_assert (!pass_vtable);
9612 imt_arg = emit_get_rgctx_method (cfg, context_used,
9613 cmethod, MONO_RGCTX_INFO_METHOD);
9617 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
9619 /* Calling virtual generic methods */
9620 if (virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
9621 !(MONO_METHOD_IS_FINAL (cmethod) &&
9622 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
9623 fsig->generic_param_count &&
9624 !(cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) &&
9626 MonoInst *this_temp, *this_arg_temp, *store;
9627 MonoInst *iargs [4];
9629 g_assert (fsig->is_inflated);
9631 /* Prevent inlining of methods that contain indirect calls */
9632 INLINE_FAILURE ("virtual generic call");
9634 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig))
9635 GSHAREDVT_FAILURE (*ip);
9637 if (cfg->backend->have_generalized_imt_trampoline && cfg->backend->gshared_supported && cmethod->wrapper_type == MONO_WRAPPER_NONE) {
9638 g_assert (!imt_arg);
9640 g_assert (cmethod->is_inflated);
9641 imt_arg = emit_get_rgctx_method (cfg, context_used,
9642 cmethod, MONO_RGCTX_INFO_METHOD);
9643 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, sp [0], imt_arg, NULL);
9645 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
9646 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
9647 MONO_ADD_INS (cfg->cbb, store);
9649 /* FIXME: This should be a managed pointer */
9650 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
9652 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
9653 iargs [1] = emit_get_rgctx_method (cfg, context_used,
9654 cmethod, MONO_RGCTX_INFO_METHOD);
9655 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
9656 addr = mono_emit_jit_icall (cfg,
9657 mono_helper_compile_generic_method, iargs);
9659 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
9661 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9668 * Implement a workaround for the inherent races involved in locking:
9674 * If a thread abort happens between the call to Monitor.Enter () and the start of the
9675 * try block, the Exit () won't be executed, see:
9676 * http://www.bluebytesoftware.com/blog/2007/01/30/MonitorEnterThreadAbortsAndOrphanedLocks.aspx
9677 * To work around this, we extend such try blocks to include the last x bytes
9678 * of the Monitor.Enter () call.
9680 if (cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
9681 MonoBasicBlock *tbb;
9683 GET_BBLOCK (cfg, tbb, ip + 5);
9685 * Only extend try blocks with a finally, to avoid catching exceptions thrown
9686 * from Monitor.Enter like ArgumentNullException.
9688 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
9689 /* Mark this bblock as needing to be extended */
9690 tbb->extend_try_block = TRUE;
9694 /* Conversion to a JIT intrinsic */
9695 if ((cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
9696 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9697 type_to_eval_stack_type ((cfg), fsig->ret, ins);
9705 if ((cfg->opt & MONO_OPT_INLINE) &&
9706 (!virtual_ || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
9707 mono_method_check_inlining (cfg, cmethod)) {
9709 gboolean always = FALSE;
9711 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
9712 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
9713 /* Prevent inlining of methods that call wrappers */
9714 INLINE_FAILURE ("wrapper call");
9715 cmethod = mono_marshal_get_native_wrapper (cmethod, TRUE, FALSE);
9719 costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, always);
9721 cfg->real_offset += 5;
9723 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9724 /* *sp is already set by inline_method */
9729 inline_costs += costs;
9735 /* Tail recursion elimination */
9736 if ((cfg->opt & MONO_OPT_TAILC) && call_opcode == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
9737 gboolean has_vtargs = FALSE;
9740 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
9741 INLINE_FAILURE ("tail call");
9743 /* keep it simple */
9744 for (i = fsig->param_count - 1; i >= 0; i--) {
9745 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
9750 if (need_seq_point) {
9751 emit_seq_point (cfg, method, ip, FALSE, TRUE);
9752 need_seq_point = FALSE;
9754 for (i = 0; i < n; ++i)
9755 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
9756 MONO_INST_NEW (cfg, ins, OP_BR);
9757 MONO_ADD_INS (cfg->cbb, ins);
9758 tblock = start_bblock->out_bb [0];
9759 link_bblock (cfg, cfg->cbb, tblock);
9760 ins->inst_target_bb = tblock;
9761 start_new_bblock = 1;
9763 /* skip the CEE_RET, too */
9764 if (ip_in_bb (cfg, cfg->cbb, ip + 5))
9771 inline_costs += 10 * num_calls++;
9774 * Synchronized wrappers.
9775 * Its hard to determine where to replace a method with its synchronized
9776 * wrapper without causing an infinite recursion. The current solution is
9777 * to add the synchronized wrapper in the trampolines, and to
9778 * change the called method to a dummy wrapper, and resolve that wrapper
9779 * to the real method in mono_jit_compile_method ().
9781 if (cfg->method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
9782 MonoMethod *orig = mono_marshal_method_from_wrapper (cfg->method);
9783 if (cmethod == orig || (cmethod->is_inflated && mono_method_get_declaring_generic_method (cmethod) == orig))
9784 cmethod = mono_marshal_get_synchronized_inner_wrapper (cmethod);
9788 * Making generic calls out of gsharedvt methods.
9789 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
9790 * patching gshared method addresses into a gsharedvt method.
9792 if (cfg->gsharedvt && (mini_is_gsharedvt_signature (fsig) || cmethod->is_inflated || cmethod->klass->generic_class) &&
9793 !(cmethod->klass->rank && cmethod->klass->byval_arg.type != MONO_TYPE_SZARRAY) &&
9794 (!(cfg->llvm_only && virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL)))) {
9795 MonoRgctxInfoType info_type;
9798 //if (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)
9799 //GSHAREDVT_FAILURE (*ip);
9800 // disable for possible remoting calls
9801 if (fsig->hasthis && (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class))
9802 GSHAREDVT_FAILURE (*ip);
9803 if (fsig->generic_param_count) {
9804 /* virtual generic call */
9805 g_assert (!imt_arg);
9806 /* Same as the virtual generic case above */
9807 imt_arg = emit_get_rgctx_method (cfg, context_used,
9808 cmethod, MONO_RGCTX_INFO_METHOD);
9809 /* This is not needed, as the trampoline code will pass one, and it might be passed in the same reg as the imt arg */
9811 } else if ((cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) && !imt_arg) {
9812 /* This can happen when we call a fully instantiated iface method */
9813 imt_arg = emit_get_rgctx_method (cfg, context_used,
9814 cmethod, MONO_RGCTX_INFO_METHOD);
9819 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (cmethod->name, "Invoke")))
9820 keep_this_alive = sp [0];
9822 if (virtual_ && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))
9823 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE_VIRT;
9825 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE;
9826 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, info_type);
9828 if (cfg->llvm_only) {
9829 // FIXME: Avoid initializing vtable_arg
9830 ins = emit_llvmonly_calli (cfg, fsig, sp, addr);
9832 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
9837 /* Generic sharing */
9840 * Use this if the callee is gsharedvt sharable too, since
9841 * at runtime we might find an instantiation so the call cannot
9842 * be patched (the 'no_patch' code path in mini-trampolines.c).
9844 if (context_used && !imt_arg && !array_rank && !delegate_invoke &&
9845 (!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
9846 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
9847 (!virtual_ || MONO_METHOD_IS_FINAL (cmethod) ||
9848 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
9849 INLINE_FAILURE ("gshared");
9851 g_assert (cfg->gshared && cmethod);
9855 * We are compiling a call to a
9856 * generic method from shared code,
9857 * which means that we have to look up
9858 * the method in the rgctx and do an
9862 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
9864 if (cfg->llvm_only) {
9865 if (cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig))
9866 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GSHAREDVT_OUT_WRAPPER);
9868 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
9869 // FIXME: Avoid initializing imt_arg/vtable_arg
9870 ins = emit_llvmonly_calli (cfg, fsig, sp, addr);
9872 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
9873 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
9878 /* Direct calls to icalls */
9880 MonoMethod *wrapper;
9883 /* Inline the wrapper */
9884 wrapper = mono_marshal_get_native_wrapper (cmethod, TRUE, cfg->compile_aot);
9886 costs = inline_method (cfg, wrapper, fsig, sp, ip, cfg->real_offset, TRUE);
9887 g_assert (costs > 0);
9888 cfg->real_offset += 5;
9890 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9891 /* *sp is already set by inline_method */
9896 inline_costs += costs;
9905 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
9906 MonoInst *val = sp [fsig->param_count];
9908 if (val->type == STACK_OBJ) {
9909 MonoInst *iargs [2];
9914 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
9917 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
9918 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, val->dreg);
9919 if (cfg->gen_write_barriers && val->type == STACK_OBJ && !(val->opcode == OP_PCONST && val->inst_c0 == 0))
9920 emit_write_barrier (cfg, addr, val);
9921 if (cfg->gen_write_barriers && mini_is_gsharedvt_klass (cmethod->klass))
9922 GSHAREDVT_FAILURE (*ip);
9923 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
9924 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
9926 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
9927 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
9928 if (!cmethod->klass->element_class->valuetype && !readonly)
9929 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
9930 CHECK_TYPELOAD (cmethod->klass);
9933 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
9936 g_assert_not_reached ();
9943 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual_ ? sp [0] : NULL);
9947 /* Tail prefix / tail call optimization */
9949 /* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests */
9950 /* FIXME: runtime generic context pointer for jumps? */
9951 /* FIXME: handle this for generic sharing eventually */
9952 if ((ins_flag & MONO_INST_TAILCALL) &&
9953 !vtable_arg && !cfg->gshared && is_supported_tail_call (cfg, method, cmethod, fsig, call_opcode))
9954 supported_tail_call = TRUE;
9956 if (supported_tail_call) {
9959 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
9960 INLINE_FAILURE ("tail call");
9962 //printf ("HIT: %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
9964 if (cfg->backend->have_op_tail_call) {
9965 /* Handle tail calls similarly to normal calls */
9968 emit_instrumentation_call (cfg, mono_profiler_method_leave);
9970 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
9971 call->tail_call = TRUE;
9972 call->method = cmethod;
9973 call->signature = mono_method_signature (cmethod);
9976 * We implement tail calls by storing the actual arguments into the
9977 * argument variables, then emitting a CEE_JMP.
9979 for (i = 0; i < n; ++i) {
9980 /* Prevent argument from being register allocated */
9981 arg_array [i]->flags |= MONO_INST_VOLATILE;
9982 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
9984 ins = (MonoInst*)call;
9985 ins->inst_p0 = cmethod;
9986 ins->inst_p1 = arg_array [0];
9987 MONO_ADD_INS (cfg->cbb, ins);
9988 link_bblock (cfg, cfg->cbb, end_bblock);
9989 start_new_bblock = 1;
9991 // FIXME: Eliminate unreachable epilogs
9994 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
9995 * only reachable from this call.
9997 GET_BBLOCK (cfg, tblock, ip + 5);
9998 if (tblock == cfg->cbb || tblock->in_count == 0)
10007 * Virtual calls in llvm-only mode.
10009 if (cfg->llvm_only && virtual_ && cmethod && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL)) {
10010 ins = emit_llvmonly_virtual_call (cfg, cmethod, fsig, context_used, sp);
10015 INLINE_FAILURE ("call");
10016 ins = mono_emit_method_call_full (cfg, cmethod, fsig, tail_call, sp, virtual_ ? sp [0] : NULL,
10017 imt_arg, vtable_arg);
10019 if (tail_call && !cfg->llvm_only) {
10020 link_bblock (cfg, cfg->cbb, end_bblock);
10021 start_new_bblock = 1;
10023 // FIXME: Eliminate unreachable epilogs
10026 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
10027 * only reachable from this call.
10029 GET_BBLOCK (cfg, tblock, ip + 5);
10030 if (tblock == cfg->cbb || tblock->in_count == 0)
10037 /* End of call, INS should contain the result of the call, if any */
10039 if (push_res && !MONO_TYPE_IS_VOID (fsig->ret)) {
10042 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
10047 if (keep_this_alive) {
10048 MonoInst *dummy_use;
10050 /* See mono_emit_method_call_full () */
10051 EMIT_NEW_DUMMY_USE (cfg, dummy_use, keep_this_alive);
10054 if (cfg->llvm_only && cmethod && method_needs_stack_walk (cfg, cmethod)) {
10056 * Clang can convert these calls to tail calls which screw up the stack
10057 * walk. This happens even when the -fno-optimize-sibling-calls
10058 * option is passed to clang.
10059 * Work around this by emitting a dummy call.
10061 mono_emit_jit_icall (cfg, mono_dummy_jit_icall, NULL);
10064 CHECK_CFG_EXCEPTION;
10068 g_assert (*ip == CEE_RET);
10072 constrained_class = NULL;
10073 if (need_seq_point)
10074 emit_seq_point (cfg, method, ip, FALSE, TRUE);
10078 if (cfg->method != method) {
10079 /* return from inlined method */
10081 * If in_count == 0, that means the ret is unreachable due to
10082 * being preceeded by a throw. In that case, inline_method () will
10083 * handle setting the return value
10084 * (test case: test_0_inline_throw ()).
10086 if (return_var && cfg->cbb->in_count) {
10087 MonoType *ret_type = mono_method_signature (method)->ret;
10093 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
10096 //g_assert (returnvar != -1);
10097 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
10098 cfg->ret_var_set = TRUE;
10101 emit_instrumentation_call (cfg, mono_profiler_method_leave);
10103 if (cfg->lmf_var && cfg->cbb->in_count && !cfg->llvm_only)
10104 emit_pop_lmf (cfg);
10107 MonoType *ret_type = mini_get_underlying_type (mono_method_signature (method)->ret);
10109 if (seq_points && !sym_seq_points) {
10111 * Place a seq point here too even through the IL stack is not
10112 * empty, so a step over on
10115 * will work correctly.
10117 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
10118 MONO_ADD_INS (cfg->cbb, ins);
10121 g_assert (!return_var);
10125 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
10128 emit_setret (cfg, *sp);
10131 if (sp != stack_start)
10133 MONO_INST_NEW (cfg, ins, OP_BR);
10135 ins->inst_target_bb = end_bblock;
10136 MONO_ADD_INS (cfg->cbb, ins);
10137 link_bblock (cfg, cfg->cbb, end_bblock);
10138 start_new_bblock = 1;
10142 MONO_INST_NEW (cfg, ins, OP_BR);
10144 target = ip + 1 + (signed char)(*ip);
10146 GET_BBLOCK (cfg, tblock, target);
10147 link_bblock (cfg, cfg->cbb, tblock);
10148 ins->inst_target_bb = tblock;
10149 if (sp != stack_start) {
10150 handle_stack_args (cfg, stack_start, sp - stack_start);
10152 CHECK_UNVERIFIABLE (cfg);
10154 MONO_ADD_INS (cfg->cbb, ins);
10155 start_new_bblock = 1;
10156 inline_costs += BRANCH_COST;
10170 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
10172 target = ip + 1 + *(signed char*)ip;
10175 ADD_BINCOND (NULL);
10178 inline_costs += BRANCH_COST;
10182 MONO_INST_NEW (cfg, ins, OP_BR);
10185 target = ip + 4 + (gint32)read32(ip);
10187 GET_BBLOCK (cfg, tblock, target);
10188 link_bblock (cfg, cfg->cbb, tblock);
10189 ins->inst_target_bb = tblock;
10190 if (sp != stack_start) {
10191 handle_stack_args (cfg, stack_start, sp - stack_start);
10193 CHECK_UNVERIFIABLE (cfg);
10196 MONO_ADD_INS (cfg->cbb, ins);
10198 start_new_bblock = 1;
10199 inline_costs += BRANCH_COST;
10201 case CEE_BRFALSE_S:
10206 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
10207 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
10208 guint32 opsize = is_short ? 1 : 4;
10210 CHECK_OPSIZE (opsize);
10212 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
10215 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
10220 GET_BBLOCK (cfg, tblock, target);
10221 link_bblock (cfg, cfg->cbb, tblock);
10222 GET_BBLOCK (cfg, tblock, ip);
10223 link_bblock (cfg, cfg->cbb, tblock);
10225 if (sp != stack_start) {
10226 handle_stack_args (cfg, stack_start, sp - stack_start);
10227 CHECK_UNVERIFIABLE (cfg);
10230 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
10231 cmp->sreg1 = sp [0]->dreg;
10232 type_from_op (cfg, cmp, sp [0], NULL);
10235 #if SIZEOF_REGISTER == 4
10236 if (cmp->opcode == OP_LCOMPARE_IMM) {
10237 /* Convert it to OP_LCOMPARE */
10238 MONO_INST_NEW (cfg, ins, OP_I8CONST);
10239 ins->type = STACK_I8;
10240 ins->dreg = alloc_dreg (cfg, STACK_I8);
10242 MONO_ADD_INS (cfg->cbb, ins);
10243 cmp->opcode = OP_LCOMPARE;
10244 cmp->sreg2 = ins->dreg;
10247 MONO_ADD_INS (cfg->cbb, cmp);
10249 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
10250 type_from_op (cfg, ins, sp [0], NULL);
10251 MONO_ADD_INS (cfg->cbb, ins);
10252 ins->inst_many_bb = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
10253 GET_BBLOCK (cfg, tblock, target);
10254 ins->inst_true_bb = tblock;
10255 GET_BBLOCK (cfg, tblock, ip);
10256 ins->inst_false_bb = tblock;
10257 start_new_bblock = 2;
10260 inline_costs += BRANCH_COST;
10275 MONO_INST_NEW (cfg, ins, *ip);
10277 target = ip + 4 + (gint32)read32(ip);
10280 ADD_BINCOND (NULL);
10283 inline_costs += BRANCH_COST;
10287 MonoBasicBlock **targets;
10288 MonoBasicBlock *default_bblock;
10289 MonoJumpInfoBBTable *table;
10290 int offset_reg = alloc_preg (cfg);
10291 int target_reg = alloc_preg (cfg);
10292 int table_reg = alloc_preg (cfg);
10293 int sum_reg = alloc_preg (cfg);
10294 gboolean use_op_switch;
10298 n = read32 (ip + 1);
10301 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
10305 CHECK_OPSIZE (n * sizeof (guint32));
10306 target = ip + n * sizeof (guint32);
10308 GET_BBLOCK (cfg, default_bblock, target);
10309 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
10311 targets = (MonoBasicBlock **)mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
10312 for (i = 0; i < n; ++i) {
10313 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
10314 targets [i] = tblock;
10315 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
10319 if (sp != stack_start) {
10321 * Link the current bb with the targets as well, so handle_stack_args
10322 * will set their in_stack correctly.
10324 link_bblock (cfg, cfg->cbb, default_bblock);
10325 for (i = 0; i < n; ++i)
10326 link_bblock (cfg, cfg->cbb, targets [i]);
10328 handle_stack_args (cfg, stack_start, sp - stack_start);
10330 CHECK_UNVERIFIABLE (cfg);
10332 /* Undo the links */
10333 mono_unlink_bblock (cfg, cfg->cbb, default_bblock);
10334 for (i = 0; i < n; ++i)
10335 mono_unlink_bblock (cfg, cfg->cbb, targets [i]);
10338 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
10339 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
10341 for (i = 0; i < n; ++i)
10342 link_bblock (cfg, cfg->cbb, targets [i]);
10344 table = (MonoJumpInfoBBTable *)mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
10345 table->table = targets;
10346 table->table_size = n;
10348 use_op_switch = FALSE;
10350 /* ARM implements SWITCH statements differently */
10351 /* FIXME: Make it use the generic implementation */
10352 if (!cfg->compile_aot)
10353 use_op_switch = TRUE;
10356 if (COMPILE_LLVM (cfg))
10357 use_op_switch = TRUE;
10359 cfg->cbb->has_jump_table = 1;
10361 if (use_op_switch) {
10362 MONO_INST_NEW (cfg, ins, OP_SWITCH);
10363 ins->sreg1 = src1->dreg;
10364 ins->inst_p0 = table;
10365 ins->inst_many_bb = targets;
10366 ins->klass = (MonoClass *)GUINT_TO_POINTER (n);
10367 MONO_ADD_INS (cfg->cbb, ins);
10369 if (sizeof (gpointer) == 8)
10370 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
10372 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
10374 #if SIZEOF_REGISTER == 8
10375 /* The upper word might not be zero, and we add it to a 64 bit address later */
10376 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
10379 if (cfg->compile_aot) {
10380 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
10382 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
10383 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
10384 ins->inst_p0 = table;
10385 ins->dreg = table_reg;
10386 MONO_ADD_INS (cfg->cbb, ins);
10389 /* FIXME: Use load_memindex */
10390 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
10391 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
10392 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
10394 start_new_bblock = 1;
10395 inline_costs += (BRANCH_COST * 2);
10408 case CEE_LDIND_REF:
10415 dreg = alloc_freg (cfg);
10418 dreg = alloc_lreg (cfg);
10420 case CEE_LDIND_REF:
10421 dreg = alloc_ireg_ref (cfg);
10424 dreg = alloc_preg (cfg);
10427 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
10428 ins->type = ldind_type [*ip - CEE_LDIND_I1];
10429 if (*ip == CEE_LDIND_R4)
10430 ins->type = cfg->r4_stack_type;
10431 ins->flags |= ins_flag;
10432 MONO_ADD_INS (cfg->cbb, ins);
10434 if (ins_flag & MONO_INST_VOLATILE) {
10435 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10436 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10441 case CEE_STIND_REF:
10452 if (ins_flag & MONO_INST_VOLATILE) {
10453 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
10454 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
10457 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
10458 ins->flags |= ins_flag;
10461 MONO_ADD_INS (cfg->cbb, ins);
10463 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0)))
10464 emit_write_barrier (cfg, sp [0], sp [1]);
10473 MONO_INST_NEW (cfg, ins, (*ip));
10475 ins->sreg1 = sp [0]->dreg;
10476 ins->sreg2 = sp [1]->dreg;
10477 type_from_op (cfg, ins, sp [0], sp [1]);
10479 ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type);
10481 /* Use the immediate opcodes if possible */
10482 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
10483 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
10484 if (imm_opcode != -1) {
10485 ins->opcode = imm_opcode;
10486 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
10489 NULLIFY_INS (sp [1]);
10493 MONO_ADD_INS ((cfg)->cbb, (ins));
10495 *sp++ = mono_decompose_opcode (cfg, ins);
10512 MONO_INST_NEW (cfg, ins, (*ip));
10514 ins->sreg1 = sp [0]->dreg;
10515 ins->sreg2 = sp [1]->dreg;
10516 type_from_op (cfg, ins, sp [0], sp [1]);
10518 add_widen_op (cfg, ins, &sp [0], &sp [1]);
10519 ins->dreg = alloc_dreg ((cfg), (MonoStackType)(ins)->type);
10521 /* FIXME: Pass opcode to is_inst_imm */
10523 /* Use the immediate opcodes if possible */
10524 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
10525 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
10526 if (imm_opcode != -1) {
10527 ins->opcode = imm_opcode;
10528 if (sp [1]->opcode == OP_I8CONST) {
10529 #if SIZEOF_REGISTER == 8
10530 ins->inst_imm = sp [1]->inst_l;
10532 ins->inst_ls_word = sp [1]->inst_ls_word;
10533 ins->inst_ms_word = sp [1]->inst_ms_word;
10537 ins->inst_imm = (gssize)(sp [1]->inst_c0);
10540 /* Might be followed by an instruction added by add_widen_op */
10541 if (sp [1]->next == NULL)
10542 NULLIFY_INS (sp [1]);
10545 MONO_ADD_INS ((cfg)->cbb, (ins));
10547 *sp++ = mono_decompose_opcode (cfg, ins);
10560 case CEE_CONV_OVF_I8:
10561 case CEE_CONV_OVF_U8:
10562 case CEE_CONV_R_UN:
10565 /* Special case this earlier so we have long constants in the IR */
10566 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
10567 int data = sp [-1]->inst_c0;
10568 sp [-1]->opcode = OP_I8CONST;
10569 sp [-1]->type = STACK_I8;
10570 #if SIZEOF_REGISTER == 8
10571 if ((*ip) == CEE_CONV_U8)
10572 sp [-1]->inst_c0 = (guint32)data;
10574 sp [-1]->inst_c0 = data;
10576 sp [-1]->inst_ls_word = data;
10577 if ((*ip) == CEE_CONV_U8)
10578 sp [-1]->inst_ms_word = 0;
10580 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
10582 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
10589 case CEE_CONV_OVF_I4:
10590 case CEE_CONV_OVF_I1:
10591 case CEE_CONV_OVF_I2:
10592 case CEE_CONV_OVF_I:
10593 case CEE_CONV_OVF_U:
10596 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
10597 ADD_UNOP (CEE_CONV_OVF_I8);
10604 case CEE_CONV_OVF_U1:
10605 case CEE_CONV_OVF_U2:
10606 case CEE_CONV_OVF_U4:
10609 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
10610 ADD_UNOP (CEE_CONV_OVF_U8);
10617 case CEE_CONV_OVF_I1_UN:
10618 case CEE_CONV_OVF_I2_UN:
10619 case CEE_CONV_OVF_I4_UN:
10620 case CEE_CONV_OVF_I8_UN:
10621 case CEE_CONV_OVF_U1_UN:
10622 case CEE_CONV_OVF_U2_UN:
10623 case CEE_CONV_OVF_U4_UN:
10624 case CEE_CONV_OVF_U8_UN:
10625 case CEE_CONV_OVF_I_UN:
10626 case CEE_CONV_OVF_U_UN:
10633 CHECK_CFG_EXCEPTION;
10637 case CEE_ADD_OVF_UN:
10639 case CEE_MUL_OVF_UN:
10641 case CEE_SUB_OVF_UN:
10647 GSHAREDVT_FAILURE (*ip);
10650 token = read32 (ip + 1);
10651 klass = mini_get_class (method, token, generic_context);
10652 CHECK_TYPELOAD (klass);
10654 if (generic_class_is_reference_type (cfg, klass)) {
10655 MonoInst *store, *load;
10656 int dreg = alloc_ireg_ref (cfg);
10658 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
10659 load->flags |= ins_flag;
10660 MONO_ADD_INS (cfg->cbb, load);
10662 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
10663 store->flags |= ins_flag;
10664 MONO_ADD_INS (cfg->cbb, store);
10666 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER)
10667 emit_write_barrier (cfg, sp [0], sp [1]);
10669 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
10675 int loc_index = -1;
10681 token = read32 (ip + 1);
10682 klass = mini_get_class (method, token, generic_context);
10683 CHECK_TYPELOAD (klass);
10685 /* Optimize the common ldobj+stloc combination */
10688 loc_index = ip [6];
10695 loc_index = ip [5] - CEE_STLOC_0;
10702 if ((loc_index != -1) && ip_in_bb (cfg, cfg->cbb, ip + 5)) {
10703 CHECK_LOCAL (loc_index);
10705 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
10706 ins->dreg = cfg->locals [loc_index]->dreg;
10707 ins->flags |= ins_flag;
10710 if (ins_flag & MONO_INST_VOLATILE) {
10711 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10712 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10718 /* Optimize the ldobj+stobj combination */
10719 /* The reference case ends up being a load+store anyway */
10720 /* Skip this if the operation is volatile. */
10721 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass) && !(ins_flag & MONO_INST_VOLATILE)) {
10726 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
10733 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
10734 ins->flags |= ins_flag;
10737 if (ins_flag & MONO_INST_VOLATILE) {
10738 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10739 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10748 CHECK_STACK_OVF (1);
10750 n = read32 (ip + 1);
10752 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
10753 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
10754 ins->type = STACK_OBJ;
10757 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
10758 MonoInst *iargs [1];
10759 char *str = (char *)mono_method_get_wrapper_data (method, n);
10761 if (cfg->compile_aot)
10762 EMIT_NEW_LDSTRLITCONST (cfg, iargs [0], str);
10764 EMIT_NEW_PCONST (cfg, iargs [0], str);
10765 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
10767 if (cfg->opt & MONO_OPT_SHARED) {
10768 MonoInst *iargs [3];
10770 if (cfg->compile_aot) {
10771 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
10773 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10774 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
10775 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
10776 *sp = mono_emit_jit_icall (cfg, ves_icall_mono_ldstr, iargs);
10777 mono_ldstr_checked (cfg->domain, image, mono_metadata_token_index (n), &cfg->error);
10780 if (cfg->cbb->out_of_line) {
10781 MonoInst *iargs [2];
10783 if (image == mono_defaults.corlib) {
10785 * Avoid relocations in AOT and save some space by using a
10786 * version of helper_ldstr specialized to mscorlib.
10788 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
10789 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
10791 /* Avoid creating the string object */
10792 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
10793 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
10794 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
10798 if (cfg->compile_aot) {
10799 NEW_LDSTRCONST (cfg, ins, image, n);
10801 MONO_ADD_INS (cfg->cbb, ins);
10804 NEW_PCONST (cfg, ins, NULL);
10805 ins->type = STACK_OBJ;
10806 ins->inst_p0 = mono_ldstr_checked (cfg->domain, image, mono_metadata_token_index (n), &cfg->error);
10810 OUT_OF_MEMORY_FAILURE;
10813 MONO_ADD_INS (cfg->cbb, ins);
10822 MonoInst *iargs [2];
10823 MonoMethodSignature *fsig;
10826 MonoInst *vtable_arg = NULL;
10829 token = read32 (ip + 1);
10830 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
10833 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
10836 mono_save_token_info (cfg, image, token, cmethod);
10838 if (!mono_class_init (cmethod->klass))
10839 TYPE_LOAD_ERROR (cmethod->klass);
10841 context_used = mini_method_check_context_used (cfg, cmethod);
10843 if (mono_security_core_clr_enabled ())
10844 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
10846 if (cfg->gshared && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
10847 emit_class_init (cfg, cmethod->klass);
10848 CHECK_TYPELOAD (cmethod->klass);
10852 if (cfg->gsharedvt) {
10853 if (mini_is_gsharedvt_variable_signature (sig))
10854 GSHAREDVT_FAILURE (*ip);
10858 n = fsig->param_count;
10862 * Generate smaller code for the common newobj <exception> instruction in
10863 * argument checking code.
10865 if (cfg->cbb->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
10866 is_exception_class (cmethod->klass) && n <= 2 &&
10867 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
10868 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
10869 MonoInst *iargs [3];
10873 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
10876 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
10879 iargs [1] = sp [0];
10880 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
10883 iargs [1] = sp [0];
10884 iargs [2] = sp [1];
10885 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
10888 g_assert_not_reached ();
10896 /* move the args to allow room for 'this' in the first position */
10902 /* check_call_signature () requires sp[0] to be set */
10903 this_ins.type = STACK_OBJ;
10904 sp [0] = &this_ins;
10905 if (check_call_signature (cfg, fsig, sp))
10910 if (mini_class_is_system_array (cmethod->klass)) {
10911 *sp = emit_get_rgctx_method (cfg, context_used,
10912 cmethod, MONO_RGCTX_INFO_METHOD);
10914 /* Avoid varargs in the common case */
10915 if (fsig->param_count == 1)
10916 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
10917 else if (fsig->param_count == 2)
10918 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
10919 else if (fsig->param_count == 3)
10920 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
10921 else if (fsig->param_count == 4)
10922 alloc = mono_emit_jit_icall (cfg, mono_array_new_4, sp);
10924 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
10925 } else if (cmethod->string_ctor) {
10926 g_assert (!context_used);
10927 g_assert (!vtable_arg);
10928 /* we simply pass a null pointer */
10929 EMIT_NEW_PCONST (cfg, *sp, NULL);
10930 /* now call the string ctor */
10931 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, NULL, NULL, NULL);
10933 if (cmethod->klass->valuetype) {
10934 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
10935 emit_init_rvar (cfg, iargs [0]->dreg, &cmethod->klass->byval_arg);
10936 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
10941 * The code generated by mini_emit_virtual_call () expects
10942 * iargs [0] to be a boxed instance, but luckily the vcall
10943 * will be transformed into a normal call there.
10945 } else if (context_used) {
10946 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
10949 MonoVTable *vtable = NULL;
10951 if (!cfg->compile_aot)
10952 vtable = mono_class_vtable (cfg->domain, cmethod->klass);
10953 CHECK_TYPELOAD (cmethod->klass);
10956 * TypeInitializationExceptions thrown from the mono_runtime_class_init
10957 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
10958 * As a workaround, we call class cctors before allocating objects.
10960 if (mini_field_access_needs_cctor_run (cfg, method, cmethod->klass, vtable) && !(g_slist_find (class_inits, cmethod->klass))) {
10961 emit_class_init (cfg, cmethod->klass);
10962 if (cfg->verbose_level > 2)
10963 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
10964 class_inits = g_slist_prepend (class_inits, cmethod->klass);
10967 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
10970 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
10973 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
10975 /* Now call the actual ctor */
10976 handle_ctor_call (cfg, cmethod, fsig, context_used, sp, ip, &inline_costs);
10977 CHECK_CFG_EXCEPTION;
10980 if (alloc == NULL) {
10982 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
10983 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
10991 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip - header->code)))
10992 emit_seq_point (cfg, method, ip, FALSE, TRUE);
10995 case CEE_CASTCLASS:
11000 token = read32 (ip + 1);
11001 klass = mini_get_class (method, token, generic_context);
11002 CHECK_TYPELOAD (klass);
11003 if (sp [0]->type != STACK_OBJ)
11006 MONO_INST_NEW (cfg, ins, *ip == CEE_ISINST ? OP_ISINST : OP_CASTCLASS);
11007 ins->dreg = alloc_preg (cfg);
11008 ins->sreg1 = (*sp)->dreg;
11009 ins->klass = klass;
11010 ins->type = STACK_OBJ;
11011 MONO_ADD_INS (cfg->cbb, ins);
11013 CHECK_CFG_EXCEPTION;
11017 cfg->flags |= MONO_CFG_HAS_TYPE_CHECK;
11020 case CEE_UNBOX_ANY: {
11021 MonoInst *res, *addr;
11026 token = read32 (ip + 1);
11027 klass = mini_get_class (method, token, generic_context);
11028 CHECK_TYPELOAD (klass);
11030 mono_save_token_info (cfg, image, token, klass);
11032 context_used = mini_class_check_context_used (cfg, klass);
11034 if (mini_is_gsharedvt_klass (klass)) {
11035 res = handle_unbox_gsharedvt (cfg, klass, *sp);
11037 } else if (generic_class_is_reference_type (cfg, klass)) {
11038 MONO_INST_NEW (cfg, res, OP_CASTCLASS);
11039 res->dreg = alloc_preg (cfg);
11040 res->sreg1 = (*sp)->dreg;
11041 res->klass = klass;
11042 res->type = STACK_OBJ;
11043 MONO_ADD_INS (cfg->cbb, res);
11044 cfg->flags |= MONO_CFG_HAS_TYPE_CHECK;
11045 } else if (mono_class_is_nullable (klass)) {
11046 res = handle_unbox_nullable (cfg, *sp, klass, context_used);
11048 addr = handle_unbox (cfg, klass, sp, context_used);
11050 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
11061 MonoClass *enum_class;
11062 MonoMethod *has_flag;
11068 token = read32 (ip + 1);
11069 klass = mini_get_class (method, token, generic_context);
11070 CHECK_TYPELOAD (klass);
11072 mono_save_token_info (cfg, image, token, klass);
11074 context_used = mini_class_check_context_used (cfg, klass);
11076 if (generic_class_is_reference_type (cfg, klass)) {
11082 if (klass == mono_defaults.void_class)
11084 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
11086 /* frequent check in generic code: box (struct), brtrue */
11091 * <push int/long ptr>
11094 * constrained. MyFlags
11095 * callvirt instace bool class [mscorlib] System.Enum::HasFlag (class [mscorlib] System.Enum)
11097 * If we find this sequence and the operand types on box and constrained
11098 * are equal, we can emit a specialized instruction sequence instead of
11099 * the very slow HasFlag () call.
11101 if ((cfg->opt & MONO_OPT_INTRINS) &&
11102 /* Cheap checks first. */
11103 ip + 5 + 6 + 5 < end &&
11104 ip [5] == CEE_PREFIX1 &&
11105 ip [6] == CEE_CONSTRAINED_ &&
11106 ip [11] == CEE_CALLVIRT &&
11107 ip_in_bb (cfg, cfg->cbb, ip + 5 + 6 + 5) &&
11108 mono_class_is_enum (klass) &&
11109 (enum_class = mini_get_class (method, read32 (ip + 7), generic_context)) &&
11110 (has_flag = mini_get_method (cfg, method, read32 (ip + 12), NULL, generic_context)) &&
11111 has_flag->klass == mono_defaults.enum_class &&
11112 !strcmp (has_flag->name, "HasFlag") &&
11113 has_flag->signature->hasthis &&
11114 has_flag->signature->param_count == 1) {
11115 CHECK_TYPELOAD (enum_class);
11117 if (enum_class == klass) {
11118 MonoInst *enum_this, *enum_flag;
11123 enum_this = sp [0];
11124 enum_flag = sp [1];
11126 *sp++ = handle_enum_has_flag (cfg, klass, enum_this, enum_flag);
11131 // FIXME: LLVM can't handle the inconsistent bb linking
11132 if (!mono_class_is_nullable (klass) &&
11133 !mini_is_gsharedvt_klass (klass) &&
11134 ip + 5 < end && ip_in_bb (cfg, cfg->cbb, ip + 5) &&
11135 (ip [5] == CEE_BRTRUE ||
11136 ip [5] == CEE_BRTRUE_S ||
11137 ip [5] == CEE_BRFALSE ||
11138 ip [5] == CEE_BRFALSE_S)) {
11139 gboolean is_true = ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S;
11141 MonoBasicBlock *true_bb, *false_bb;
11145 if (cfg->verbose_level > 3) {
11146 printf ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
11147 printf ("<box+brtrue opt>\n");
11152 case CEE_BRFALSE_S:
11155 target = ip + 1 + (signed char)(*ip);
11162 target = ip + 4 + (gint)(read32 (ip));
11166 g_assert_not_reached ();
11170 * We need to link both bblocks, since it is needed for handling stack
11171 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
11172 * Branching to only one of them would lead to inconsistencies, so
11173 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
11175 GET_BBLOCK (cfg, true_bb, target);
11176 GET_BBLOCK (cfg, false_bb, ip);
11178 mono_link_bblock (cfg, cfg->cbb, true_bb);
11179 mono_link_bblock (cfg, cfg->cbb, false_bb);
11181 if (sp != stack_start) {
11182 handle_stack_args (cfg, stack_start, sp - stack_start);
11184 CHECK_UNVERIFIABLE (cfg);
11187 if (COMPILE_LLVM (cfg)) {
11188 dreg = alloc_ireg (cfg);
11189 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
11190 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
11192 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
11194 /* The JIT can't eliminate the iconst+compare */
11195 MONO_INST_NEW (cfg, ins, OP_BR);
11196 ins->inst_target_bb = is_true ? true_bb : false_bb;
11197 MONO_ADD_INS (cfg->cbb, ins);
11200 start_new_bblock = 1;
11204 *sp++ = handle_box (cfg, val, klass, context_used);
11206 CHECK_CFG_EXCEPTION;
11215 token = read32 (ip + 1);
11216 klass = mini_get_class (method, token, generic_context);
11217 CHECK_TYPELOAD (klass);
11219 mono_save_token_info (cfg, image, token, klass);
11221 context_used = mini_class_check_context_used (cfg, klass);
11223 if (mono_class_is_nullable (klass)) {
11226 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
11227 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
11231 ins = handle_unbox (cfg, klass, sp, context_used);
11244 MonoClassField *field;
11245 #ifndef DISABLE_REMOTING
11249 gboolean is_instance;
11251 gpointer addr = NULL;
11252 gboolean is_special_static;
11254 MonoInst *store_val = NULL;
11255 MonoInst *thread_ins;
11258 is_instance = (op == CEE_LDFLD || op == CEE_LDFLDA || op == CEE_STFLD);
11260 if (op == CEE_STFLD) {
11263 store_val = sp [1];
11268 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
11270 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
11273 if (op == CEE_STSFLD) {
11276 store_val = sp [0];
11281 token = read32 (ip + 1);
11282 if (method->wrapper_type != MONO_WRAPPER_NONE) {
11283 field = (MonoClassField *)mono_method_get_wrapper_data (method, token);
11284 klass = field->parent;
11287 field = mono_field_from_token_checked (image, token, &klass, generic_context, &cfg->error);
11290 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
11291 FIELD_ACCESS_FAILURE (method, field);
11292 mono_class_init (klass);
11294 /* if the class is Critical then transparent code cannot access it's fields */
11295 if (!is_instance && mono_security_core_clr_enabled ())
11296 ensure_method_is_allowed_to_access_field (cfg, method, field);
11298 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
11299 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
11300 if (mono_security_core_clr_enabled ())
11301 ensure_method_is_allowed_to_access_field (cfg, method, field);
11304 ftype = mono_field_get_type (field);
11307 * LDFLD etc. is usable on static fields as well, so convert those cases to
11310 if (is_instance && ftype->attrs & FIELD_ATTRIBUTE_STATIC) {
11322 g_assert_not_reached ();
11324 is_instance = FALSE;
11327 context_used = mini_class_check_context_used (cfg, klass);
11329 /* INSTANCE CASE */
11331 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
11332 if (op == CEE_STFLD) {
11333 if (target_type_is_incompatible (cfg, field->type, sp [1]))
11335 #ifndef DISABLE_REMOTING
11336 if ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class) {
11337 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
11338 MonoInst *iargs [5];
11340 GSHAREDVT_FAILURE (op);
11342 iargs [0] = sp [0];
11343 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
11344 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
11345 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
11347 iargs [4] = sp [1];
11349 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
11350 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
11351 iargs, ip, cfg->real_offset, TRUE);
11352 CHECK_CFG_EXCEPTION;
11353 g_assert (costs > 0);
11355 cfg->real_offset += 5;
11357 inline_costs += costs;
11359 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
11364 MonoInst *store, *wbarrier_ptr_ins = NULL;
11366 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
11368 if (ins_flag & MONO_INST_VOLATILE) {
11369 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
11370 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
11373 if (mini_is_gsharedvt_klass (klass)) {
11374 MonoInst *offset_ins;
11376 context_used = mini_class_check_context_used (cfg, klass);
11378 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
11379 /* The value is offset by 1 */
11380 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
11381 dreg = alloc_ireg_mp (cfg);
11382 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
11383 wbarrier_ptr_ins = ins;
11384 /* The decomposition will call mini_emit_stobj () which will emit a wbarrier if needed */
11385 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, dreg, 0, sp [1]->dreg);
11387 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
11389 if (sp [0]->opcode != OP_LDADDR)
11390 store->flags |= MONO_INST_FAULT;
11392 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
11393 if (mini_is_gsharedvt_klass (klass)) {
11394 g_assert (wbarrier_ptr_ins);
11395 emit_write_barrier (cfg, wbarrier_ptr_ins, sp [1]);
11397 /* insert call to write barrier */
11401 dreg = alloc_ireg_mp (cfg);
11402 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
11403 emit_write_barrier (cfg, ptr, sp [1]);
11407 store->flags |= ins_flag;
11414 #ifndef DISABLE_REMOTING
11415 if (is_instance && ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class)) {
11416 MonoMethod *wrapper = (op == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
11417 MonoInst *iargs [4];
11419 GSHAREDVT_FAILURE (op);
11421 iargs [0] = sp [0];
11422 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
11423 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
11424 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
11425 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
11426 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
11427 iargs, ip, cfg->real_offset, TRUE);
11428 CHECK_CFG_EXCEPTION;
11429 g_assert (costs > 0);
11431 cfg->real_offset += 5;
11435 inline_costs += costs;
11437 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
11443 if (sp [0]->type == STACK_VTYPE) {
11446 /* Have to compute the address of the variable */
11448 var = get_vreg_to_inst (cfg, sp [0]->dreg);
11450 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
11452 g_assert (var->klass == klass);
11454 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
11458 if (op == CEE_LDFLDA) {
11459 if (sp [0]->type == STACK_OBJ) {
11460 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
11461 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
11464 dreg = alloc_ireg_mp (cfg);
11466 if (mini_is_gsharedvt_klass (klass)) {
11467 MonoInst *offset_ins;
11469 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
11470 /* The value is offset by 1 */
11471 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
11472 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
11474 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
11476 ins->klass = mono_class_from_mono_type (field->type);
11477 ins->type = STACK_MP;
11482 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
11484 if (mini_is_gsharedvt_klass (klass)) {
11485 MonoInst *offset_ins;
11487 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
11488 /* The value is offset by 1 */
11489 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
11490 dreg = alloc_ireg_mp (cfg);
11491 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
11492 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, dreg, 0);
11494 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
11496 load->flags |= ins_flag;
11497 if (sp [0]->opcode != OP_LDADDR)
11498 load->flags |= MONO_INST_FAULT;
11510 context_used = mini_class_check_context_used (cfg, klass);
11512 if (ftype->attrs & FIELD_ATTRIBUTE_LITERAL) {
11513 mono_error_set_field_load (&cfg->error, field->parent, field->name, "Using static instructions with literal field");
11517 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
11518 * to be called here.
11520 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
11521 mono_class_vtable (cfg->domain, klass);
11522 CHECK_TYPELOAD (klass);
11524 mono_domain_lock (cfg->domain);
11525 if (cfg->domain->special_static_fields)
11526 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
11527 mono_domain_unlock (cfg->domain);
11529 is_special_static = mono_class_field_is_special_static (field);
11531 if (is_special_static && ((gsize)addr & 0x80000000) == 0)
11532 thread_ins = mono_get_thread_intrinsic (cfg);
11536 /* Generate IR to compute the field address */
11537 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && thread_ins && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
11539 * Fast access to TLS data
11540 * Inline version of get_thread_static_data () in
11544 int idx, static_data_reg, array_reg, dreg;
11546 GSHAREDVT_FAILURE (op);
11548 MONO_ADD_INS (cfg->cbb, thread_ins);
11549 static_data_reg = alloc_ireg (cfg);
11550 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, MONO_STRUCT_OFFSET (MonoInternalThread, static_data));
11552 if (cfg->compile_aot) {
11553 int offset_reg, offset2_reg, idx_reg;
11555 /* For TLS variables, this will return the TLS offset */
11556 EMIT_NEW_SFLDACONST (cfg, ins, field);
11557 offset_reg = ins->dreg;
11558 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
11559 idx_reg = alloc_ireg (cfg);
11560 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, idx_reg, offset_reg, 0x3f);
11561 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
11562 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
11563 array_reg = alloc_ireg (cfg);
11564 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
11565 offset2_reg = alloc_ireg (cfg);
11566 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_UN_IMM, offset2_reg, offset_reg, 6);
11567 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset2_reg, 0x1ffffff);
11568 dreg = alloc_ireg (cfg);
11569 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
11571 offset = (gsize)addr & 0x7fffffff;
11572 idx = offset & 0x3f;
11574 array_reg = alloc_ireg (cfg);
11575 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
11576 dreg = alloc_ireg (cfg);
11577 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, ((offset >> 6) & 0x1ffffff));
11579 } else if ((cfg->opt & MONO_OPT_SHARED) ||
11580 (cfg->compile_aot && is_special_static) ||
11581 (context_used && is_special_static)) {
11582 MonoInst *iargs [2];
11584 g_assert (field->parent);
11585 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11586 if (context_used) {
11587 iargs [1] = emit_get_rgctx_field (cfg, context_used,
11588 field, MONO_RGCTX_INFO_CLASS_FIELD);
11590 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
11592 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
11593 } else if (context_used) {
11594 MonoInst *static_data;
11597 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
11598 method->klass->name_space, method->klass->name, method->name,
11599 depth, field->offset);
11602 if (mono_class_needs_cctor_run (klass, method))
11603 emit_class_init (cfg, klass);
11606 * The pointer we're computing here is
11608 * super_info.static_data + field->offset
11610 static_data = emit_get_rgctx_klass (cfg, context_used,
11611 klass, MONO_RGCTX_INFO_STATIC_DATA);
11613 if (mini_is_gsharedvt_klass (klass)) {
11614 MonoInst *offset_ins;
11616 offset_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET);
11617 /* The value is offset by 1 */
11618 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
11619 dreg = alloc_ireg_mp (cfg);
11620 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, static_data->dreg, offset_ins->dreg);
11621 } else if (field->offset == 0) {
11624 int addr_reg = mono_alloc_preg (cfg);
11625 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
11627 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
11628 MonoInst *iargs [2];
11630 g_assert (field->parent);
11631 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11632 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
11633 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
11635 MonoVTable *vtable = NULL;
11637 if (!cfg->compile_aot)
11638 vtable = mono_class_vtable (cfg->domain, klass);
11639 CHECK_TYPELOAD (klass);
11642 if (mini_field_access_needs_cctor_run (cfg, method, klass, vtable)) {
11643 if (!(g_slist_find (class_inits, klass))) {
11644 emit_class_init (cfg, klass);
11645 if (cfg->verbose_level > 2)
11646 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
11647 class_inits = g_slist_prepend (class_inits, klass);
11650 if (cfg->run_cctors) {
11651 /* This makes so that inline cannot trigger */
11652 /* .cctors: too many apps depend on them */
11653 /* running with a specific order... */
11655 if (! vtable->initialized)
11656 INLINE_FAILURE ("class init");
11657 if (!mono_runtime_class_init_full (vtable, &cfg->error)) {
11658 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
11659 goto exception_exit;
11663 if (cfg->compile_aot)
11664 EMIT_NEW_SFLDACONST (cfg, ins, field);
11667 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
11669 EMIT_NEW_PCONST (cfg, ins, addr);
11672 MonoInst *iargs [1];
11673 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
11674 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
11678 /* Generate IR to do the actual load/store operation */
11680 if ((op == CEE_STFLD || op == CEE_STSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
11681 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
11682 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
11685 if (op == CEE_LDSFLDA) {
11686 ins->klass = mono_class_from_mono_type (ftype);
11687 ins->type = STACK_PTR;
11689 } else if (op == CEE_STSFLD) {
11692 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, ftype, ins->dreg, 0, store_val->dreg);
11693 store->flags |= ins_flag;
11695 gboolean is_const = FALSE;
11696 MonoVTable *vtable = NULL;
11697 gpointer addr = NULL;
11699 if (!context_used) {
11700 vtable = mono_class_vtable (cfg->domain, klass);
11701 CHECK_TYPELOAD (klass);
11703 if ((ftype->attrs & FIELD_ATTRIBUTE_INIT_ONLY) && (((addr = mono_aot_readonly_field_override (field)) != NULL) ||
11704 (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) && vtable->initialized))) {
11705 int ro_type = ftype->type;
11707 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
11708 if (ro_type == MONO_TYPE_VALUETYPE && ftype->data.klass->enumtype) {
11709 ro_type = mono_class_enum_basetype (ftype->data.klass)->type;
11712 GSHAREDVT_FAILURE (op);
11714 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
11717 case MONO_TYPE_BOOLEAN:
11719 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
11723 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
11726 case MONO_TYPE_CHAR:
11728 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
11732 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
11737 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
11741 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
11746 case MONO_TYPE_PTR:
11747 case MONO_TYPE_FNPTR:
11748 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
11749 type_to_eval_stack_type ((cfg), field->type, *sp);
11752 case MONO_TYPE_STRING:
11753 case MONO_TYPE_OBJECT:
11754 case MONO_TYPE_CLASS:
11755 case MONO_TYPE_SZARRAY:
11756 case MONO_TYPE_ARRAY:
11757 if (!mono_gc_is_moving ()) {
11758 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
11759 type_to_eval_stack_type ((cfg), field->type, *sp);
11767 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
11772 case MONO_TYPE_VALUETYPE:
11782 CHECK_STACK_OVF (1);
11784 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
11785 load->flags |= ins_flag;
11791 if ((op == CEE_LDFLD || op == CEE_LDSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
11792 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
11793 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
11804 token = read32 (ip + 1);
11805 klass = mini_get_class (method, token, generic_context);
11806 CHECK_TYPELOAD (klass);
11807 if (ins_flag & MONO_INST_VOLATILE) {
11808 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
11809 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
11811 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
11812 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
11813 ins->flags |= ins_flag;
11814 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
11815 generic_class_is_reference_type (cfg, klass)) {
11816 /* insert call to write barrier */
11817 emit_write_barrier (cfg, sp [0], sp [1]);
11829 const char *data_ptr;
11831 guint32 field_token;
11837 token = read32 (ip + 1);
11839 klass = mini_get_class (method, token, generic_context);
11840 CHECK_TYPELOAD (klass);
11842 context_used = mini_class_check_context_used (cfg, klass);
11844 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
11845 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_OVF_U4);
11846 ins->sreg1 = sp [0]->dreg;
11847 ins->type = STACK_I4;
11848 ins->dreg = alloc_ireg (cfg);
11849 MONO_ADD_INS (cfg->cbb, ins);
11850 *sp = mono_decompose_opcode (cfg, ins);
11853 if (context_used) {
11854 MonoInst *args [3];
11855 MonoClass *array_class = mono_array_class_get (klass, 1);
11856 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class);
11858 /* FIXME: Use OP_NEWARR and decompose later to help abcrem */
11861 args [0] = emit_get_rgctx_klass (cfg, context_used,
11862 array_class, MONO_RGCTX_INFO_VTABLE);
11867 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
11869 ins = mono_emit_jit_icall (cfg, ves_icall_array_new_specific, args);
11871 if (cfg->opt & MONO_OPT_SHARED) {
11872 /* Decompose now to avoid problems with references to the domainvar */
11873 MonoInst *iargs [3];
11875 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11876 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
11877 iargs [2] = sp [0];
11879 ins = mono_emit_jit_icall (cfg, ves_icall_array_new, iargs);
11881 /* Decompose later since it is needed by abcrem */
11882 MonoClass *array_type = mono_array_class_get (klass, 1);
11883 mono_class_vtable (cfg->domain, array_type);
11884 CHECK_TYPELOAD (array_type);
11886 MONO_INST_NEW (cfg, ins, OP_NEWARR);
11887 ins->dreg = alloc_ireg_ref (cfg);
11888 ins->sreg1 = sp [0]->dreg;
11889 ins->inst_newa_class = klass;
11890 ins->type = STACK_OBJ;
11891 ins->klass = array_type;
11892 MONO_ADD_INS (cfg->cbb, ins);
11893 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
11894 cfg->cbb->has_array_access = TRUE;
11896 /* Needed so mono_emit_load_get_addr () gets called */
11897 mono_get_got_var (cfg);
11907 * we inline/optimize the initialization sequence if possible.
11908 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
11909 * for small sizes open code the memcpy
11910 * ensure the rva field is big enough
11912 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, cfg->cbb, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
11913 MonoMethod *memcpy_method = get_memcpy_method ();
11914 MonoInst *iargs [3];
11915 int add_reg = alloc_ireg_mp (cfg);
11917 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, MONO_STRUCT_OFFSET (MonoArray, vector));
11918 if (cfg->compile_aot) {
11919 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
11921 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
11923 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
11924 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
11933 if (sp [0]->type != STACK_OBJ)
11936 MONO_INST_NEW (cfg, ins, OP_LDLEN);
11937 ins->dreg = alloc_preg (cfg);
11938 ins->sreg1 = sp [0]->dreg;
11939 ins->type = STACK_I4;
11940 /* This flag will be inherited by the decomposition */
11941 ins->flags |= MONO_INST_FAULT;
11942 MONO_ADD_INS (cfg->cbb, ins);
11943 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
11944 cfg->cbb->has_array_access = TRUE;
11952 if (sp [0]->type != STACK_OBJ)
11955 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11957 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11958 CHECK_TYPELOAD (klass);
11959 /* we need to make sure that this array is exactly the type it needs
11960 * to be for correctness. the wrappers are lax with their usage
11961 * so we need to ignore them here
11963 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
11964 MonoClass *array_class = mono_array_class_get (klass, 1);
11965 mini_emit_check_array_type (cfg, sp [0], array_class);
11966 CHECK_TYPELOAD (array_class);
11970 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11975 case CEE_LDELEM_I1:
11976 case CEE_LDELEM_U1:
11977 case CEE_LDELEM_I2:
11978 case CEE_LDELEM_U2:
11979 case CEE_LDELEM_I4:
11980 case CEE_LDELEM_U4:
11981 case CEE_LDELEM_I8:
11983 case CEE_LDELEM_R4:
11984 case CEE_LDELEM_R8:
11985 case CEE_LDELEM_REF: {
11991 if (*ip == CEE_LDELEM) {
11993 token = read32 (ip + 1);
11994 klass = mini_get_class (method, token, generic_context);
11995 CHECK_TYPELOAD (klass);
11996 mono_class_init (klass);
11999 klass = array_access_to_klass (*ip);
12001 if (sp [0]->type != STACK_OBJ)
12004 cfg->flags |= MONO_CFG_HAS_LDELEMA;
12006 if (mini_is_gsharedvt_variable_klass (klass)) {
12007 // FIXME-VT: OP_ICONST optimization
12008 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
12009 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
12010 ins->opcode = OP_LOADV_MEMBASE;
12011 } else if (sp [1]->opcode == OP_ICONST) {
12012 int array_reg = sp [0]->dreg;
12013 int index_reg = sp [1]->dreg;
12014 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
12016 if (SIZEOF_REGISTER == 8 && COMPILE_LLVM (cfg))
12017 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, index_reg, index_reg);
12019 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
12020 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
12022 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
12023 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
12026 if (*ip == CEE_LDELEM)
12033 case CEE_STELEM_I1:
12034 case CEE_STELEM_I2:
12035 case CEE_STELEM_I4:
12036 case CEE_STELEM_I8:
12037 case CEE_STELEM_R4:
12038 case CEE_STELEM_R8:
12039 case CEE_STELEM_REF:
12044 cfg->flags |= MONO_CFG_HAS_LDELEMA;
12046 if (*ip == CEE_STELEM) {
12048 token = read32 (ip + 1);
12049 klass = mini_get_class (method, token, generic_context);
12050 CHECK_TYPELOAD (klass);
12051 mono_class_init (klass);
12054 klass = array_access_to_klass (*ip);
12056 if (sp [0]->type != STACK_OBJ)
12059 emit_array_store (cfg, klass, sp, TRUE);
12061 if (*ip == CEE_STELEM)
12068 case CEE_CKFINITE: {
12072 if (cfg->llvm_only) {
12073 MonoInst *iargs [1];
12075 iargs [0] = sp [0];
12076 *sp++ = mono_emit_jit_icall (cfg, mono_ckfinite, iargs);
12078 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
12079 ins->sreg1 = sp [0]->dreg;
12080 ins->dreg = alloc_freg (cfg);
12081 ins->type = STACK_R8;
12082 MONO_ADD_INS (cfg->cbb, ins);
12084 *sp++ = mono_decompose_opcode (cfg, ins);
12090 case CEE_REFANYVAL: {
12091 MonoInst *src_var, *src;
12093 int klass_reg = alloc_preg (cfg);
12094 int dreg = alloc_preg (cfg);
12096 GSHAREDVT_FAILURE (*ip);
12099 MONO_INST_NEW (cfg, ins, *ip);
12102 klass = mini_get_class (method, read32 (ip + 1), generic_context);
12103 CHECK_TYPELOAD (klass);
12105 context_used = mini_class_check_context_used (cfg, klass);
12108 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
12110 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
12111 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
12112 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass));
12114 if (context_used) {
12115 MonoInst *klass_ins;
12117 klass_ins = emit_get_rgctx_klass (cfg, context_used,
12118 klass, MONO_RGCTX_INFO_KLASS);
12121 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
12122 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
12124 mini_emit_class_check (cfg, klass_reg, klass);
12126 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value));
12127 ins->type = STACK_MP;
12128 ins->klass = klass;
12133 case CEE_MKREFANY: {
12134 MonoInst *loc, *addr;
12136 GSHAREDVT_FAILURE (*ip);
12139 MONO_INST_NEW (cfg, ins, *ip);
12142 klass = mini_get_class (method, read32 (ip + 1), generic_context);
12143 CHECK_TYPELOAD (klass);
12145 context_used = mini_class_check_context_used (cfg, klass);
12147 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
12148 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
12150 if (context_used) {
12151 MonoInst *const_ins;
12152 int type_reg = alloc_preg (cfg);
12154 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
12155 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
12156 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
12157 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
12158 } else if (cfg->compile_aot) {
12159 int const_reg = alloc_preg (cfg);
12160 int type_reg = alloc_preg (cfg);
12162 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
12163 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
12164 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
12165 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
12167 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
12168 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), klass);
12170 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
12172 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
12173 ins->type = STACK_VTYPE;
12174 ins->klass = mono_defaults.typed_reference_class;
12179 case CEE_LDTOKEN: {
12181 MonoClass *handle_class;
12183 CHECK_STACK_OVF (1);
12186 n = read32 (ip + 1);
12188 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
12189 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
12190 handle = mono_method_get_wrapper_data (method, n);
12191 handle_class = (MonoClass *)mono_method_get_wrapper_data (method, n + 1);
12192 if (handle_class == mono_defaults.typehandle_class)
12193 handle = &((MonoClass*)handle)->byval_arg;
12196 handle = mono_ldtoken_checked (image, n, &handle_class, generic_context, &cfg->error);
12201 mono_class_init (handle_class);
12202 if (cfg->gshared) {
12203 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
12204 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
12205 /* This case handles ldtoken
12206 of an open type, like for
12209 } else if (handle_class == mono_defaults.typehandle_class) {
12210 context_used = mini_class_check_context_used (cfg, mono_class_from_mono_type ((MonoType *)handle));
12211 } else if (handle_class == mono_defaults.fieldhandle_class)
12212 context_used = mini_class_check_context_used (cfg, ((MonoClassField*)handle)->parent);
12213 else if (handle_class == mono_defaults.methodhandle_class)
12214 context_used = mini_method_check_context_used (cfg, (MonoMethod *)handle);
12216 g_assert_not_reached ();
12219 if ((cfg->opt & MONO_OPT_SHARED) &&
12220 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
12221 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
12222 MonoInst *addr, *vtvar, *iargs [3];
12223 int method_context_used;
12225 method_context_used = mini_method_check_context_used (cfg, method);
12227 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
12229 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
12230 EMIT_NEW_ICONST (cfg, iargs [1], n);
12231 if (method_context_used) {
12232 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
12233 method, MONO_RGCTX_INFO_METHOD);
12234 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
12236 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
12237 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
12239 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
12241 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
12243 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
12245 if ((ip + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 5) &&
12246 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
12247 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
12248 (cmethod->klass == mono_defaults.systemtype_class) &&
12249 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
12250 MonoClass *tclass = mono_class_from_mono_type ((MonoType *)handle);
12252 mono_class_init (tclass);
12253 if (context_used) {
12254 ins = emit_get_rgctx_klass (cfg, context_used,
12255 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
12256 } else if (cfg->compile_aot) {
12257 if (method->wrapper_type) {
12258 mono_error_init (&error); //got to do it since there are multiple conditionals below
12259 if (mono_class_get_checked (tclass->image, tclass->type_token, &error) == tclass && !generic_context) {
12260 /* Special case for static synchronized wrappers */
12261 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
12263 mono_error_cleanup (&error); /* FIXME don't swallow the error */
12264 /* FIXME: n is not a normal token */
12266 EMIT_NEW_PCONST (cfg, ins, NULL);
12269 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
12272 MonoReflectionType *rt = mono_type_get_object_checked (cfg->domain, (MonoType *)handle, &cfg->error);
12274 EMIT_NEW_PCONST (cfg, ins, rt);
12276 ins->type = STACK_OBJ;
12277 ins->klass = cmethod->klass;
12280 MonoInst *addr, *vtvar;
12282 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
12284 if (context_used) {
12285 if (handle_class == mono_defaults.typehandle_class) {
12286 ins = emit_get_rgctx_klass (cfg, context_used,
12287 mono_class_from_mono_type ((MonoType *)handle),
12288 MONO_RGCTX_INFO_TYPE);
12289 } else if (handle_class == mono_defaults.methodhandle_class) {
12290 ins = emit_get_rgctx_method (cfg, context_used,
12291 (MonoMethod *)handle, MONO_RGCTX_INFO_METHOD);
12292 } else if (handle_class == mono_defaults.fieldhandle_class) {
12293 ins = emit_get_rgctx_field (cfg, context_used,
12294 (MonoClassField *)handle, MONO_RGCTX_INFO_CLASS_FIELD);
12296 g_assert_not_reached ();
12298 } else if (cfg->compile_aot) {
12299 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n, generic_context);
12301 EMIT_NEW_PCONST (cfg, ins, handle);
12303 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
12304 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
12305 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
12315 if (sp [-1]->type != STACK_OBJ)
12318 MONO_INST_NEW (cfg, ins, OP_THROW);
12320 ins->sreg1 = sp [0]->dreg;
12322 cfg->cbb->out_of_line = TRUE;
12323 MONO_ADD_INS (cfg->cbb, ins);
12324 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
12325 MONO_ADD_INS (cfg->cbb, ins);
12328 link_bblock (cfg, cfg->cbb, end_bblock);
12329 start_new_bblock = 1;
12330 /* This can complicate code generation for llvm since the return value might not be defined */
12331 if (COMPILE_LLVM (cfg))
12332 INLINE_FAILURE ("throw");
12334 case CEE_ENDFINALLY:
12335 if (!ip_in_finally_clause (cfg, ip - header->code))
12337 /* mono_save_seq_point_info () depends on this */
12338 if (sp != stack_start)
12339 emit_seq_point (cfg, method, ip, FALSE, FALSE);
12340 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
12341 MONO_ADD_INS (cfg->cbb, ins);
12343 start_new_bblock = 1;
12346 * Control will leave the method so empty the stack, otherwise
12347 * the next basic block will start with a nonempty stack.
12349 while (sp != stack_start) {
12354 case CEE_LEAVE_S: {
12357 if (*ip == CEE_LEAVE) {
12359 target = ip + 5 + (gint32)read32(ip + 1);
12362 target = ip + 2 + (signed char)(ip [1]);
12365 /* empty the stack */
12366 while (sp != stack_start) {
12371 * If this leave statement is in a catch block, check for a
12372 * pending exception, and rethrow it if necessary.
12373 * We avoid doing this in runtime invoke wrappers, since those are called
12374 * by native code which excepts the wrapper to catch all exceptions.
12376 for (i = 0; i < header->num_clauses; ++i) {
12377 MonoExceptionClause *clause = &header->clauses [i];
12380 * Use <= in the final comparison to handle clauses with multiple
12381 * leave statements, like in bug #78024.
12382 * The ordering of the exception clauses guarantees that we find the
12383 * innermost clause.
12385 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
12387 MonoBasicBlock *dont_throw;
12392 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
12395 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
12397 NEW_BBLOCK (cfg, dont_throw);
12400 * Currently, we always rethrow the abort exception, despite the
12401 * fact that this is not correct. See thread6.cs for an example.
12402 * But propagating the abort exception is more important than
12403 * getting the sematics right.
12405 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
12406 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
12407 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
12409 MONO_START_BB (cfg, dont_throw);
12414 cfg->cbb->try_end = (intptr_t)(ip - header->code);
12417 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
12419 MonoExceptionClause *clause;
12421 for (tmp = handlers; tmp; tmp = tmp->next) {
12422 clause = (MonoExceptionClause *)tmp->data;
12423 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
12425 link_bblock (cfg, cfg->cbb, tblock);
12426 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
12427 ins->inst_target_bb = tblock;
12428 ins->inst_eh_block = clause;
12429 MONO_ADD_INS (cfg->cbb, ins);
12430 cfg->cbb->has_call_handler = 1;
12431 if (COMPILE_LLVM (cfg)) {
12432 MonoBasicBlock *target_bb;
12435 * Link the finally bblock with the target, since it will
12436 * conceptually branch there.
12438 GET_BBLOCK (cfg, tblock, cfg->cil_start + clause->handler_offset + clause->handler_len - 1);
12439 GET_BBLOCK (cfg, target_bb, target);
12440 link_bblock (cfg, tblock, target_bb);
12443 g_list_free (handlers);
12446 MONO_INST_NEW (cfg, ins, OP_BR);
12447 MONO_ADD_INS (cfg->cbb, ins);
12448 GET_BBLOCK (cfg, tblock, target);
12449 link_bblock (cfg, cfg->cbb, tblock);
12450 ins->inst_target_bb = tblock;
12452 start_new_bblock = 1;
12454 if (*ip == CEE_LEAVE)
12463 * Mono specific opcodes
12465 case MONO_CUSTOM_PREFIX: {
12467 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
12471 case CEE_MONO_ICALL: {
12473 MonoJitICallInfo *info;
12475 token = read32 (ip + 2);
12476 func = mono_method_get_wrapper_data (method, token);
12477 info = mono_find_jit_icall_by_addr (func);
12479 g_error ("Could not find icall address in wrapper %s", mono_method_full_name (method, 1));
12482 CHECK_STACK (info->sig->param_count);
12483 sp -= info->sig->param_count;
12485 ins = mono_emit_jit_icall (cfg, info->func, sp);
12486 if (!MONO_TYPE_IS_VOID (info->sig->ret))
12490 inline_costs += 10 * num_calls++;
12494 case CEE_MONO_LDPTR_CARD_TABLE:
12495 case CEE_MONO_LDPTR_NURSERY_START:
12496 case CEE_MONO_LDPTR_NURSERY_BITS:
12497 case CEE_MONO_LDPTR_INT_REQ_FLAG: {
12498 CHECK_STACK_OVF (1);
12501 case CEE_MONO_LDPTR_CARD_TABLE:
12502 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR, NULL);
12504 case CEE_MONO_LDPTR_NURSERY_START:
12505 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_NURSERY_START, NULL);
12507 case CEE_MONO_LDPTR_NURSERY_BITS:
12508 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_NURSERY_BITS, NULL);
12510 case CEE_MONO_LDPTR_INT_REQ_FLAG:
12511 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
12517 inline_costs += 10 * num_calls++;
12520 case CEE_MONO_LDPTR: {
12523 CHECK_STACK_OVF (1);
12525 token = read32 (ip + 2);
12527 ptr = mono_method_get_wrapper_data (method, token);
12528 EMIT_NEW_PCONST (cfg, ins, ptr);
12531 inline_costs += 10 * num_calls++;
12532 /* Can't embed random pointers into AOT code */
12536 case CEE_MONO_JIT_ICALL_ADDR: {
12537 MonoJitICallInfo *callinfo;
12540 CHECK_STACK_OVF (1);
12542 token = read32 (ip + 2);
12544 ptr = mono_method_get_wrapper_data (method, token);
12545 callinfo = mono_find_jit_icall_by_addr (ptr);
12546 g_assert (callinfo);
12547 EMIT_NEW_JIT_ICALL_ADDRCONST (cfg, ins, (char*)callinfo->name);
12550 inline_costs += 10 * num_calls++;
12553 case CEE_MONO_ICALL_ADDR: {
12554 MonoMethod *cmethod;
12557 CHECK_STACK_OVF (1);
12559 token = read32 (ip + 2);
12561 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
12563 if (cfg->compile_aot) {
12564 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
12566 ptr = mono_lookup_internal_call (cmethod);
12568 EMIT_NEW_PCONST (cfg, ins, ptr);
12574 case CEE_MONO_VTADDR: {
12575 MonoInst *src_var, *src;
12581 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
12582 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
12587 case CEE_MONO_NEWOBJ: {
12588 MonoInst *iargs [2];
12590 CHECK_STACK_OVF (1);
12592 token = read32 (ip + 2);
12593 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12594 mono_class_init (klass);
12595 NEW_DOMAINCONST (cfg, iargs [0]);
12596 MONO_ADD_INS (cfg->cbb, iargs [0]);
12597 NEW_CLASSCONST (cfg, iargs [1], klass);
12598 MONO_ADD_INS (cfg->cbb, iargs [1]);
12599 *sp++ = mono_emit_jit_icall (cfg, ves_icall_object_new, iargs);
12601 inline_costs += 10 * num_calls++;
12604 case CEE_MONO_OBJADDR:
12607 MONO_INST_NEW (cfg, ins, OP_MOVE);
12608 ins->dreg = alloc_ireg_mp (cfg);
12609 ins->sreg1 = sp [0]->dreg;
12610 ins->type = STACK_MP;
12611 MONO_ADD_INS (cfg->cbb, ins);
12615 case CEE_MONO_LDNATIVEOBJ:
12617 * Similar to LDOBJ, but instead load the unmanaged
12618 * representation of the vtype to the stack.
12623 token = read32 (ip + 2);
12624 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12625 g_assert (klass->valuetype);
12626 mono_class_init (klass);
12629 MonoInst *src, *dest, *temp;
12632 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
12633 temp->backend.is_pinvoke = 1;
12634 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
12635 mini_emit_stobj (cfg, dest, src, klass, TRUE);
12637 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
12638 dest->type = STACK_VTYPE;
12639 dest->klass = klass;
12645 case CEE_MONO_RETOBJ: {
12647 * Same as RET, but return the native representation of a vtype
12650 g_assert (cfg->ret);
12651 g_assert (mono_method_signature (method)->pinvoke);
12656 token = read32 (ip + 2);
12657 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12659 if (!cfg->vret_addr) {
12660 g_assert (cfg->ret_var_is_local);
12662 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
12664 EMIT_NEW_RETLOADA (cfg, ins);
12666 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
12668 if (sp != stack_start)
12671 MONO_INST_NEW (cfg, ins, OP_BR);
12672 ins->inst_target_bb = end_bblock;
12673 MONO_ADD_INS (cfg->cbb, ins);
12674 link_bblock (cfg, cfg->cbb, end_bblock);
12675 start_new_bblock = 1;
12679 case CEE_MONO_CISINST:
12680 case CEE_MONO_CCASTCLASS: {
12685 token = read32 (ip + 2);
12686 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12687 if (ip [1] == CEE_MONO_CISINST)
12688 ins = handle_cisinst (cfg, klass, sp [0]);
12690 ins = handle_ccastclass (cfg, klass, sp [0]);
12695 case CEE_MONO_SAVE_LMF:
12696 case CEE_MONO_RESTORE_LMF:
12699 case CEE_MONO_CLASSCONST:
12700 CHECK_STACK_OVF (1);
12702 token = read32 (ip + 2);
12703 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
12706 inline_costs += 10 * num_calls++;
12708 case CEE_MONO_NOT_TAKEN:
12709 cfg->cbb->out_of_line = TRUE;
12712 case CEE_MONO_TLS: {
12715 CHECK_STACK_OVF (1);
12717 key = (MonoTlsKey)read32 (ip + 2);
12718 g_assert (key < TLS_KEY_NUM);
12720 ins = mono_create_tls_get (cfg, key);
12722 if (cfg->compile_aot) {
12724 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
12725 ins->dreg = alloc_preg (cfg);
12726 ins->type = STACK_PTR;
12728 g_assert_not_reached ();
12731 ins->type = STACK_PTR;
12732 MONO_ADD_INS (cfg->cbb, ins);
12737 case CEE_MONO_DYN_CALL: {
12738 MonoCallInst *call;
12740 /* It would be easier to call a trampoline, but that would put an
12741 * extra frame on the stack, confusing exception handling. So
12742 * implement it inline using an opcode for now.
12745 if (!cfg->dyn_call_var) {
12746 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
12747 /* prevent it from being register allocated */
12748 cfg->dyn_call_var->flags |= MONO_INST_VOLATILE;
12751 /* Has to use a call inst since it local regalloc expects it */
12752 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
12753 ins = (MonoInst*)call;
12755 ins->sreg1 = sp [0]->dreg;
12756 ins->sreg2 = sp [1]->dreg;
12757 MONO_ADD_INS (cfg->cbb, ins);
12759 cfg->param_area = MAX (cfg->param_area, cfg->backend->dyn_call_param_area);
12762 inline_costs += 10 * num_calls++;
12766 case CEE_MONO_MEMORY_BARRIER: {
12768 emit_memory_barrier (cfg, (int)read32 (ip + 2));
12772 case CEE_MONO_ATOMIC_STORE_I4: {
12773 g_assert (mono_arch_opcode_supported (OP_ATOMIC_STORE_I4));
12779 MONO_INST_NEW (cfg, ins, OP_ATOMIC_STORE_I4);
12780 ins->dreg = sp [0]->dreg;
12781 ins->sreg1 = sp [1]->dreg;
12782 ins->backend.memory_barrier_kind = (int) read32 (ip + 2);
12783 MONO_ADD_INS (cfg->cbb, ins);
12788 case CEE_MONO_JIT_ATTACH: {
12789 MonoInst *args [16], *domain_ins;
12790 MonoInst *ad_ins, *jit_tls_ins;
12791 MonoBasicBlock *next_bb = NULL, *call_bb = NULL;
12793 g_assert (!mono_threads_is_coop_enabled ());
12795 cfg->orig_domain_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
12797 EMIT_NEW_PCONST (cfg, ins, NULL);
12798 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
12800 ad_ins = mono_get_domain_intrinsic (cfg);
12801 jit_tls_ins = mono_get_jit_tls_intrinsic (cfg);
12803 if (cfg->backend->have_tls_get && ad_ins && jit_tls_ins) {
12804 NEW_BBLOCK (cfg, next_bb);
12805 NEW_BBLOCK (cfg, call_bb);
12807 if (cfg->compile_aot) {
12808 /* AOT code is only used in the root domain */
12809 EMIT_NEW_PCONST (cfg, domain_ins, NULL);
12811 EMIT_NEW_PCONST (cfg, domain_ins, cfg->domain);
12813 MONO_ADD_INS (cfg->cbb, ad_ins);
12814 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, ad_ins->dreg, domain_ins->dreg);
12815 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, call_bb);
12817 MONO_ADD_INS (cfg->cbb, jit_tls_ins);
12818 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, jit_tls_ins->dreg, 0);
12819 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, call_bb);
12821 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, next_bb);
12822 MONO_START_BB (cfg, call_bb);
12825 /* AOT code is only used in the root domain */
12826 EMIT_NEW_PCONST (cfg, args [0], cfg->compile_aot ? NULL : cfg->domain);
12827 ins = mono_emit_jit_icall (cfg, mono_jit_thread_attach, args);
12828 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
12831 MONO_START_BB (cfg, next_bb);
12837 case CEE_MONO_JIT_DETACH: {
12838 MonoInst *args [16];
12840 /* Restore the original domain */
12841 dreg = alloc_ireg (cfg);
12842 EMIT_NEW_UNALU (cfg, args [0], OP_MOVE, dreg, cfg->orig_domain_var->dreg);
12843 mono_emit_jit_icall (cfg, mono_jit_set_domain, args);
12847 case CEE_MONO_CALLI_EXTRA_ARG: {
12849 MonoMethodSignature *fsig;
12853 * This is the same as CEE_CALLI, but passes an additional argument
12854 * to the called method in llvmonly mode.
12855 * This is only used by delegate invoke wrappers to call the
12856 * actual delegate method.
12858 g_assert (method->wrapper_type == MONO_WRAPPER_DELEGATE_INVOKE);
12861 token = read32 (ip + 2);
12869 fsig = mini_get_signature (method, token, generic_context, &cfg->error);
12872 if (cfg->llvm_only)
12873 cfg->signatures = g_slist_prepend_mempool (cfg->mempool, cfg->signatures, fsig);
12875 n = fsig->param_count + fsig->hasthis + 1;
12882 if (cfg->llvm_only) {
12884 * The lowest bit of 'arg' determines whenever the callee uses the gsharedvt
12885 * cconv. This is set by mono_init_delegate ().
12887 if (cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig)) {
12888 MonoInst *callee = addr;
12889 MonoInst *call, *localloc_ins;
12890 MonoBasicBlock *is_gsharedvt_bb, *end_bb;
12891 int low_bit_reg = alloc_preg (cfg);
12893 NEW_BBLOCK (cfg, is_gsharedvt_bb);
12894 NEW_BBLOCK (cfg, end_bb);
12896 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, low_bit_reg, arg->dreg, 1);
12897 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, low_bit_reg, 0);
12898 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, is_gsharedvt_bb);
12900 /* Normal case: callee uses a normal cconv, have to add an out wrapper */
12901 addr = emit_get_rgctx_sig (cfg, context_used,
12902 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
12904 * ADDR points to a gsharedvt-out wrapper, have to pass <callee, arg> as an extra arg.
12906 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
12907 ins->dreg = alloc_preg (cfg);
12908 ins->inst_imm = 2 * SIZEOF_VOID_P;
12909 MONO_ADD_INS (cfg->cbb, ins);
12910 localloc_ins = ins;
12911 cfg->flags |= MONO_CFG_HAS_ALLOCA;
12912 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, 0, callee->dreg);
12913 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, SIZEOF_VOID_P, arg->dreg);
12915 call = emit_extra_arg_calli (cfg, fsig, sp, localloc_ins->dreg, addr);
12916 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
12918 /* Gsharedvt case: callee uses a gsharedvt cconv, no conversion is needed */
12919 MONO_START_BB (cfg, is_gsharedvt_bb);
12920 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PXOR_IMM, arg->dreg, arg->dreg, 1);
12921 ins = emit_extra_arg_calli (cfg, fsig, sp, arg->dreg, callee);
12922 ins->dreg = call->dreg;
12924 MONO_START_BB (cfg, end_bb);
12926 /* Caller uses a normal calling conv */
12928 MonoInst *callee = addr;
12929 MonoInst *call, *localloc_ins;
12930 MonoBasicBlock *is_gsharedvt_bb, *end_bb;
12931 int low_bit_reg = alloc_preg (cfg);
12933 NEW_BBLOCK (cfg, is_gsharedvt_bb);
12934 NEW_BBLOCK (cfg, end_bb);
12936 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, low_bit_reg, arg->dreg, 1);
12937 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, low_bit_reg, 0);
12938 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, is_gsharedvt_bb);
12940 /* Normal case: callee uses a normal cconv, no conversion is needed */
12941 call = emit_extra_arg_calli (cfg, fsig, sp, arg->dreg, callee);
12942 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
12943 /* Gsharedvt case: callee uses a gsharedvt cconv, have to add an in wrapper */
12944 MONO_START_BB (cfg, is_gsharedvt_bb);
12945 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PXOR_IMM, arg->dreg, arg->dreg, 1);
12946 NEW_AOTCONST (cfg, addr, MONO_PATCH_INFO_GSHAREDVT_IN_WRAPPER, fsig);
12947 MONO_ADD_INS (cfg->cbb, addr);
12949 * ADDR points to a gsharedvt-in wrapper, have to pass <callee, arg> as an extra arg.
12951 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
12952 ins->dreg = alloc_preg (cfg);
12953 ins->inst_imm = 2 * SIZEOF_VOID_P;
12954 MONO_ADD_INS (cfg->cbb, ins);
12955 localloc_ins = ins;
12956 cfg->flags |= MONO_CFG_HAS_ALLOCA;
12957 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, 0, callee->dreg);
12958 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, localloc_ins->dreg, SIZEOF_VOID_P, arg->dreg);
12960 ins = emit_extra_arg_calli (cfg, fsig, sp, localloc_ins->dreg, addr);
12961 ins->dreg = call->dreg;
12962 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
12964 MONO_START_BB (cfg, end_bb);
12967 /* Same as CEE_CALLI */
12968 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
12970 * We pass the address to the gsharedvt trampoline in the rgctx reg
12972 MonoInst *callee = addr;
12974 addr = emit_get_rgctx_sig (cfg, context_used,
12975 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
12976 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, callee);
12978 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
12982 if (!MONO_TYPE_IS_VOID (fsig->ret))
12983 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
12985 CHECK_CFG_EXCEPTION;
12989 constrained_class = NULL;
12992 case CEE_MONO_LDDOMAIN:
12993 CHECK_STACK_OVF (1);
12994 EMIT_NEW_PCONST (cfg, ins, cfg->compile_aot ? NULL : cfg->domain);
12998 case CEE_MONO_GET_LAST_ERROR:
13000 CHECK_STACK_OVF (1);
13002 MONO_INST_NEW (cfg, ins, OP_GET_LAST_ERROR);
13003 ins->dreg = alloc_dreg (cfg, STACK_I4);
13004 ins->type = STACK_I4;
13005 MONO_ADD_INS (cfg->cbb, ins);
13011 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
13017 case CEE_PREFIX1: {
13020 case CEE_ARGLIST: {
13021 /* somewhat similar to LDTOKEN */
13022 MonoInst *addr, *vtvar;
13023 CHECK_STACK_OVF (1);
13024 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
13026 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
13027 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
13029 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
13030 ins->type = STACK_VTYPE;
13031 ins->klass = mono_defaults.argumenthandle_class;
13041 MonoInst *cmp, *arg1, *arg2;
13049 * The following transforms:
13050 * CEE_CEQ into OP_CEQ
13051 * CEE_CGT into OP_CGT
13052 * CEE_CGT_UN into OP_CGT_UN
13053 * CEE_CLT into OP_CLT
13054 * CEE_CLT_UN into OP_CLT_UN
13056 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
13058 MONO_INST_NEW (cfg, ins, cmp->opcode);
13059 cmp->sreg1 = arg1->dreg;
13060 cmp->sreg2 = arg2->dreg;
13061 type_from_op (cfg, cmp, arg1, arg2);
13063 add_widen_op (cfg, cmp, &arg1, &arg2);
13064 if ((arg1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((arg1->type == STACK_PTR) || (arg1->type == STACK_OBJ) || (arg1->type == STACK_MP))))
13065 cmp->opcode = OP_LCOMPARE;
13066 else if (arg1->type == STACK_R4)
13067 cmp->opcode = OP_RCOMPARE;
13068 else if (arg1->type == STACK_R8)
13069 cmp->opcode = OP_FCOMPARE;
13071 cmp->opcode = OP_ICOMPARE;
13072 MONO_ADD_INS (cfg->cbb, cmp);
13073 ins->type = STACK_I4;
13074 ins->dreg = alloc_dreg (cfg, (MonoStackType)ins->type);
13075 type_from_op (cfg, ins, arg1, arg2);
13077 if (cmp->opcode == OP_FCOMPARE || cmp->opcode == OP_RCOMPARE) {
13079 * The backends expect the fceq opcodes to do the
13082 ins->sreg1 = cmp->sreg1;
13083 ins->sreg2 = cmp->sreg2;
13086 MONO_ADD_INS (cfg->cbb, ins);
13092 MonoInst *argconst;
13093 MonoMethod *cil_method;
13095 CHECK_STACK_OVF (1);
13097 n = read32 (ip + 2);
13098 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
13101 mono_class_init (cmethod->klass);
13103 mono_save_token_info (cfg, image, n, cmethod);
13105 context_used = mini_method_check_context_used (cfg, cmethod);
13107 cil_method = cmethod;
13108 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
13109 emit_method_access_failure (cfg, method, cil_method);
13111 if (mono_security_core_clr_enabled ())
13112 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
13115 * Optimize the common case of ldftn+delegate creation
13117 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
13118 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
13119 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
13120 MonoInst *target_ins, *handle_ins;
13121 MonoMethod *invoke;
13122 int invoke_context_used;
13124 invoke = mono_get_delegate_invoke (ctor_method->klass);
13125 if (!invoke || !mono_method_signature (invoke))
13128 invoke_context_used = mini_method_check_context_used (cfg, invoke);
13130 target_ins = sp [-1];
13132 if (mono_security_core_clr_enabled ())
13133 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method);
13135 if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
13136 /*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
13137 if (mono_method_signature (invoke)->param_count == mono_method_signature (cmethod)->param_count) {
13138 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0);
13139 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException");
13143 /* FIXME: SGEN support */
13144 if (invoke_context_used == 0 || cfg->llvm_only) {
13146 if (cfg->verbose_level > 3)
13147 g_print ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
13148 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, FALSE))) {
13151 CHECK_CFG_EXCEPTION;
13161 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
13162 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
13166 inline_costs += 10 * num_calls++;
13169 case CEE_LDVIRTFTN: {
13170 MonoInst *args [2];
13174 n = read32 (ip + 2);
13175 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
13178 mono_class_init (cmethod->klass);
13180 context_used = mini_method_check_context_used (cfg, cmethod);
13182 if (mono_security_core_clr_enabled ())
13183 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
13186 * Optimize the common case of ldvirtftn+delegate creation
13188 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 6) && (ip [6] == CEE_NEWOBJ) && (ip > header->code && ip [-1] == CEE_DUP)) {
13189 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
13190 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
13191 MonoInst *target_ins, *handle_ins;
13192 MonoMethod *invoke;
13193 int invoke_context_used;
13194 gboolean is_virtual = cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL;
13196 invoke = mono_get_delegate_invoke (ctor_method->klass);
13197 if (!invoke || !mono_method_signature (invoke))
13200 invoke_context_used = mini_method_check_context_used (cfg, invoke);
13202 target_ins = sp [-1];
13204 if (mono_security_core_clr_enabled ())
13205 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method);
13207 /* FIXME: SGEN support */
13208 if (invoke_context_used == 0 || cfg->llvm_only) {
13210 if (cfg->verbose_level > 3)
13211 g_print ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
13212 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, is_virtual))) {
13215 CHECK_CFG_EXCEPTION;
13228 args [1] = emit_get_rgctx_method (cfg, context_used,
13229 cmethod, MONO_RGCTX_INFO_METHOD);
13232 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
13234 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
13237 inline_costs += 10 * num_calls++;
13241 CHECK_STACK_OVF (1);
13243 n = read16 (ip + 2);
13245 EMIT_NEW_ARGLOAD (cfg, ins, n);
13250 CHECK_STACK_OVF (1);
13252 n = read16 (ip + 2);
13254 NEW_ARGLOADA (cfg, ins, n);
13255 MONO_ADD_INS (cfg->cbb, ins);
13263 n = read16 (ip + 2);
13265 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
13267 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
13271 CHECK_STACK_OVF (1);
13273 n = read16 (ip + 2);
13275 EMIT_NEW_LOCLOAD (cfg, ins, n);
13280 unsigned char *tmp_ip;
13281 CHECK_STACK_OVF (1);
13283 n = read16 (ip + 2);
13286 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
13292 EMIT_NEW_LOCLOADA (cfg, ins, n);
13301 n = read16 (ip + 2);
13303 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
13305 emit_stloc_ir (cfg, sp, header, n);
13309 case CEE_LOCALLOC: {
13311 MonoBasicBlock *non_zero_bb, *end_bb;
13312 int alloc_ptr = alloc_preg (cfg);
13314 if (sp != stack_start)
13316 if (cfg->method != method)
13318 * Inlining this into a loop in a parent could lead to
13319 * stack overflows which is different behavior than the
13320 * non-inlined case, thus disable inlining in this case.
13322 INLINE_FAILURE("localloc");
13324 NEW_BBLOCK (cfg, non_zero_bb);
13325 NEW_BBLOCK (cfg, end_bb);
13327 /* if size != zero */
13328 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
13329 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, non_zero_bb);
13331 //size is zero, so result is NULL
13332 MONO_EMIT_NEW_PCONST (cfg, alloc_ptr, NULL);
13333 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
13335 MONO_START_BB (cfg, non_zero_bb);
13336 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
13337 ins->dreg = alloc_ptr;
13338 ins->sreg1 = sp [0]->dreg;
13339 ins->type = STACK_PTR;
13340 MONO_ADD_INS (cfg->cbb, ins);
13342 cfg->flags |= MONO_CFG_HAS_ALLOCA;
13344 ins->flags |= MONO_INST_INIT;
13346 MONO_START_BB (cfg, end_bb);
13347 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, alloc_preg (cfg), alloc_ptr);
13348 ins->type = STACK_PTR;
13354 case CEE_ENDFILTER: {
13355 MonoExceptionClause *clause, *nearest;
13360 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
13362 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
13363 ins->sreg1 = (*sp)->dreg;
13364 MONO_ADD_INS (cfg->cbb, ins);
13365 start_new_bblock = 1;
13369 for (cc = 0; cc < header->num_clauses; ++cc) {
13370 clause = &header->clauses [cc];
13371 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
13372 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
13373 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset)))
13376 g_assert (nearest);
13377 if ((ip - header->code) != nearest->handler_offset)
13382 case CEE_UNALIGNED_:
13383 ins_flag |= MONO_INST_UNALIGNED;
13384 /* FIXME: record alignment? we can assume 1 for now */
13388 case CEE_VOLATILE_:
13389 ins_flag |= MONO_INST_VOLATILE;
13393 ins_flag |= MONO_INST_TAILCALL;
13394 cfg->flags |= MONO_CFG_HAS_TAIL;
13395 /* Can't inline tail calls at this time */
13396 inline_costs += 100000;
13403 token = read32 (ip + 2);
13404 klass = mini_get_class (method, token, generic_context);
13405 CHECK_TYPELOAD (klass);
13406 if (generic_class_is_reference_type (cfg, klass))
13407 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
13409 mini_emit_initobj (cfg, *sp, NULL, klass);
13413 case CEE_CONSTRAINED_:
13415 token = read32 (ip + 2);
13416 constrained_class = mini_get_class (method, token, generic_context);
13417 CHECK_TYPELOAD (constrained_class);
13421 case CEE_INITBLK: {
13422 MonoInst *iargs [3];
13426 /* Skip optimized paths for volatile operations. */
13427 if ((ip [1] == CEE_CPBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
13428 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
13429 } else if ((ip [1] == CEE_INITBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
13430 /* emit_memset only works when val == 0 */
13431 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
13434 iargs [0] = sp [0];
13435 iargs [1] = sp [1];
13436 iargs [2] = sp [2];
13437 if (ip [1] == CEE_CPBLK) {
13439 * FIXME: It's unclear whether we should be emitting both the acquire
13440 * and release barriers for cpblk. It is technically both a load and
13441 * store operation, so it seems like that's the sensible thing to do.
13443 * FIXME: We emit full barriers on both sides of the operation for
13444 * simplicity. We should have a separate atomic memcpy method instead.
13446 MonoMethod *memcpy_method = get_memcpy_method ();
13448 if (ins_flag & MONO_INST_VOLATILE)
13449 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
13451 call = mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
13452 call->flags |= ins_flag;
13454 if (ins_flag & MONO_INST_VOLATILE)
13455 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
13457 MonoMethod *memset_method = get_memset_method ();
13458 if (ins_flag & MONO_INST_VOLATILE) {
13459 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
13460 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
13462 call = mono_emit_method_call (cfg, memset_method, iargs, NULL);
13463 call->flags |= ins_flag;
13474 ins_flag |= MONO_INST_NOTYPECHECK;
13476 ins_flag |= MONO_INST_NORANGECHECK;
13477 /* we ignore the no-nullcheck for now since we
13478 * really do it explicitly only when doing callvirt->call
13482 case CEE_RETHROW: {
13484 int handler_offset = -1;
13486 for (i = 0; i < header->num_clauses; ++i) {
13487 MonoExceptionClause *clause = &header->clauses [i];
13488 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
13489 handler_offset = clause->handler_offset;
13494 cfg->cbb->flags |= BB_EXCEPTION_UNSAFE;
13496 if (handler_offset == -1)
13499 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
13500 MONO_INST_NEW (cfg, ins, OP_RETHROW);
13501 ins->sreg1 = load->dreg;
13502 MONO_ADD_INS (cfg->cbb, ins);
13504 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
13505 MONO_ADD_INS (cfg->cbb, ins);
13508 link_bblock (cfg, cfg->cbb, end_bblock);
13509 start_new_bblock = 1;
13517 CHECK_STACK_OVF (1);
13519 token = read32 (ip + 2);
13520 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !image_is_dynamic (method->klass->image) && !generic_context) {
13521 MonoType *type = mono_type_create_from_typespec_checked (image, token, &cfg->error);
13524 val = mono_type_size (type, &ialign);
13526 MonoClass *klass = mini_get_class (method, token, generic_context);
13527 CHECK_TYPELOAD (klass);
13529 val = mono_type_size (&klass->byval_arg, &ialign);
13531 if (mini_is_gsharedvt_klass (klass))
13532 GSHAREDVT_FAILURE (*ip);
13534 EMIT_NEW_ICONST (cfg, ins, val);
13539 case CEE_REFANYTYPE: {
13540 MonoInst *src_var, *src;
13542 GSHAREDVT_FAILURE (*ip);
13548 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
13550 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
13551 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
13552 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type));
13557 case CEE_READONLY_:
13570 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
13580 g_warning ("opcode 0x%02x not handled", *ip);
13584 if (start_new_bblock != 1)
13587 cfg->cbb->cil_length = ip - cfg->cbb->cil_code;
13588 if (cfg->cbb->next_bb) {
13589 /* This could already be set because of inlining, #693905 */
13590 MonoBasicBlock *bb = cfg->cbb;
13592 while (bb->next_bb)
13594 bb->next_bb = end_bblock;
13596 cfg->cbb->next_bb = end_bblock;
13599 if (cfg->method == method && cfg->domainvar) {
13601 MonoInst *get_domain;
13603 cfg->cbb = init_localsbb;
13605 if ((get_domain = mono_get_domain_intrinsic (cfg))) {
13606 MONO_ADD_INS (cfg->cbb, get_domain);
13608 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
13610 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
13611 MONO_ADD_INS (cfg->cbb, store);
13614 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
13615 if (cfg->compile_aot)
13616 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
13617 mono_get_got_var (cfg);
13620 if (cfg->method == method && cfg->got_var)
13621 mono_emit_load_got_addr (cfg);
13623 if (init_localsbb) {
13624 cfg->cbb = init_localsbb;
13626 for (i = 0; i < header->num_locals; ++i) {
13627 emit_init_local (cfg, i, header->locals [i], init_locals);
13631 if (cfg->init_ref_vars && cfg->method == method) {
13632 /* Emit initialization for ref vars */
13633 // FIXME: Avoid duplication initialization for IL locals.
13634 for (i = 0; i < cfg->num_varinfo; ++i) {
13635 MonoInst *ins = cfg->varinfo [i];
13637 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
13638 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
13642 if (cfg->lmf_var && cfg->method == method && !cfg->llvm_only) {
13643 cfg->cbb = init_localsbb;
13644 emit_push_lmf (cfg);
13647 cfg->cbb = init_localsbb;
13648 emit_instrumentation_call (cfg, mono_profiler_method_enter);
13651 MonoBasicBlock *bb;
13654 * Make seq points at backward branch targets interruptable.
13656 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
13657 if (bb->code && bb->in_count > 1 && bb->code->opcode == OP_SEQ_POINT)
13658 bb->code->flags |= MONO_INST_SINGLE_STEP_LOC;
13661 /* Add a sequence point for method entry/exit events */
13662 if (seq_points && cfg->gen_sdb_seq_points) {
13663 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
13664 MONO_ADD_INS (init_localsbb, ins);
13665 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
13666 MONO_ADD_INS (cfg->bb_exit, ins);
13670 * Add seq points for IL offsets which have line number info, but wasn't generated a seq point during JITting because
13671 * the code they refer to was dead (#11880).
13673 if (sym_seq_points) {
13674 for (i = 0; i < header->code_size; ++i) {
13675 if (mono_bitset_test_fast (seq_point_locs, i) && !mono_bitset_test_fast (seq_point_set_locs, i)) {
13678 NEW_SEQ_POINT (cfg, ins, i, FALSE);
13679 mono_add_seq_point (cfg, NULL, ins, SEQ_POINT_NATIVE_OFFSET_DEAD_CODE);
13686 if (cfg->method == method) {
13687 MonoBasicBlock *bb;
13688 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13689 if (bb == cfg->bb_init)
13692 bb->region = mono_find_block_region (cfg, bb->real_offset);
13694 mono_create_spvar_for_region (cfg, bb->region);
13695 if (cfg->verbose_level > 2)
13696 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
13699 MonoBasicBlock *bb;
13700 /* get_most_deep_clause () in mini-llvm.c depends on this for inlined bblocks */
13701 for (bb = start_bblock; bb != end_bblock; bb = bb->next_bb) {
13702 bb->real_offset = inline_offset;
13706 if (inline_costs < 0) {
13709 /* Method is too large */
13710 mname = mono_method_full_name (method, TRUE);
13711 mono_cfg_set_exception_invalid_program (cfg, g_strdup_printf ("Method %s is too complex.", mname));
13715 if ((cfg->verbose_level > 2) && (cfg->method == method))
13716 mono_print_code (cfg, "AFTER METHOD-TO-IR");
13721 g_assert (!mono_error_ok (&cfg->error));
13725 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
13729 set_exception_type_from_invalid_il (cfg, method, ip);
13733 g_slist_free (class_inits);
13734 mono_basic_block_free (original_bb);
13735 cfg->dont_inline = g_list_remove (cfg->dont_inline, method);
13736 if (cfg->exception_type)
13739 return inline_costs;
13743 store_membase_reg_to_store_membase_imm (int opcode)
13746 case OP_STORE_MEMBASE_REG:
13747 return OP_STORE_MEMBASE_IMM;
13748 case OP_STOREI1_MEMBASE_REG:
13749 return OP_STOREI1_MEMBASE_IMM;
13750 case OP_STOREI2_MEMBASE_REG:
13751 return OP_STOREI2_MEMBASE_IMM;
13752 case OP_STOREI4_MEMBASE_REG:
13753 return OP_STOREI4_MEMBASE_IMM;
13754 case OP_STOREI8_MEMBASE_REG:
13755 return OP_STOREI8_MEMBASE_IMM;
13757 g_assert_not_reached ();
13764 mono_op_to_op_imm (int opcode)
13768 return OP_IADD_IMM;
13770 return OP_ISUB_IMM;
13772 return OP_IDIV_IMM;
13774 return OP_IDIV_UN_IMM;
13776 return OP_IREM_IMM;
13778 return OP_IREM_UN_IMM;
13780 return OP_IMUL_IMM;
13782 return OP_IAND_IMM;
13786 return OP_IXOR_IMM;
13788 return OP_ISHL_IMM;
13790 return OP_ISHR_IMM;
13792 return OP_ISHR_UN_IMM;
13795 return OP_LADD_IMM;
13797 return OP_LSUB_IMM;
13799 return OP_LAND_IMM;
13803 return OP_LXOR_IMM;
13805 return OP_LSHL_IMM;
13807 return OP_LSHR_IMM;
13809 return OP_LSHR_UN_IMM;
13810 #if SIZEOF_REGISTER == 8
13812 return OP_LREM_IMM;
13816 return OP_COMPARE_IMM;
13818 return OP_ICOMPARE_IMM;
13820 return OP_LCOMPARE_IMM;
13822 case OP_STORE_MEMBASE_REG:
13823 return OP_STORE_MEMBASE_IMM;
13824 case OP_STOREI1_MEMBASE_REG:
13825 return OP_STOREI1_MEMBASE_IMM;
13826 case OP_STOREI2_MEMBASE_REG:
13827 return OP_STOREI2_MEMBASE_IMM;
13828 case OP_STOREI4_MEMBASE_REG:
13829 return OP_STOREI4_MEMBASE_IMM;
13831 #if defined(TARGET_X86) || defined (TARGET_AMD64)
13833 return OP_X86_PUSH_IMM;
13834 case OP_X86_COMPARE_MEMBASE_REG:
13835 return OP_X86_COMPARE_MEMBASE_IMM;
13837 #if defined(TARGET_AMD64)
13838 case OP_AMD64_ICOMPARE_MEMBASE_REG:
13839 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
13841 case OP_VOIDCALL_REG:
13842 return OP_VOIDCALL;
13850 return OP_LOCALLOC_IMM;
13857 ldind_to_load_membase (int opcode)
13861 return OP_LOADI1_MEMBASE;
13863 return OP_LOADU1_MEMBASE;
13865 return OP_LOADI2_MEMBASE;
13867 return OP_LOADU2_MEMBASE;
13869 return OP_LOADI4_MEMBASE;
13871 return OP_LOADU4_MEMBASE;
13873 return OP_LOAD_MEMBASE;
13874 case CEE_LDIND_REF:
13875 return OP_LOAD_MEMBASE;
13877 return OP_LOADI8_MEMBASE;
13879 return OP_LOADR4_MEMBASE;
13881 return OP_LOADR8_MEMBASE;
13883 g_assert_not_reached ();
13890 stind_to_store_membase (int opcode)
13894 return OP_STOREI1_MEMBASE_REG;
13896 return OP_STOREI2_MEMBASE_REG;
13898 return OP_STOREI4_MEMBASE_REG;
13900 case CEE_STIND_REF:
13901 return OP_STORE_MEMBASE_REG;
13903 return OP_STOREI8_MEMBASE_REG;
13905 return OP_STORER4_MEMBASE_REG;
13907 return OP_STORER8_MEMBASE_REG;
13909 g_assert_not_reached ();
13916 mono_load_membase_to_load_mem (int opcode)
13918 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
13919 #if defined(TARGET_X86) || defined(TARGET_AMD64)
13921 case OP_LOAD_MEMBASE:
13922 return OP_LOAD_MEM;
13923 case OP_LOADU1_MEMBASE:
13924 return OP_LOADU1_MEM;
13925 case OP_LOADU2_MEMBASE:
13926 return OP_LOADU2_MEM;
13927 case OP_LOADI4_MEMBASE:
13928 return OP_LOADI4_MEM;
13929 case OP_LOADU4_MEMBASE:
13930 return OP_LOADU4_MEM;
13931 #if SIZEOF_REGISTER == 8
13932 case OP_LOADI8_MEMBASE:
13933 return OP_LOADI8_MEM;
13942 op_to_op_dest_membase (int store_opcode, int opcode)
13944 #if defined(TARGET_X86)
13945 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
13950 return OP_X86_ADD_MEMBASE_REG;
13952 return OP_X86_SUB_MEMBASE_REG;
13954 return OP_X86_AND_MEMBASE_REG;
13956 return OP_X86_OR_MEMBASE_REG;
13958 return OP_X86_XOR_MEMBASE_REG;
13961 return OP_X86_ADD_MEMBASE_IMM;
13964 return OP_X86_SUB_MEMBASE_IMM;
13967 return OP_X86_AND_MEMBASE_IMM;
13970 return OP_X86_OR_MEMBASE_IMM;
13973 return OP_X86_XOR_MEMBASE_IMM;
13979 #if defined(TARGET_AMD64)
13980 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
13985 return OP_X86_ADD_MEMBASE_REG;
13987 return OP_X86_SUB_MEMBASE_REG;
13989 return OP_X86_AND_MEMBASE_REG;
13991 return OP_X86_OR_MEMBASE_REG;
13993 return OP_X86_XOR_MEMBASE_REG;
13995 return OP_X86_ADD_MEMBASE_IMM;
13997 return OP_X86_SUB_MEMBASE_IMM;
13999 return OP_X86_AND_MEMBASE_IMM;
14001 return OP_X86_OR_MEMBASE_IMM;
14003 return OP_X86_XOR_MEMBASE_IMM;
14005 return OP_AMD64_ADD_MEMBASE_REG;
14007 return OP_AMD64_SUB_MEMBASE_REG;
14009 return OP_AMD64_AND_MEMBASE_REG;
14011 return OP_AMD64_OR_MEMBASE_REG;
14013 return OP_AMD64_XOR_MEMBASE_REG;
14016 return OP_AMD64_ADD_MEMBASE_IMM;
14019 return OP_AMD64_SUB_MEMBASE_IMM;
14022 return OP_AMD64_AND_MEMBASE_IMM;
14025 return OP_AMD64_OR_MEMBASE_IMM;
14028 return OP_AMD64_XOR_MEMBASE_IMM;
14038 op_to_op_store_membase (int store_opcode, int opcode)
14040 #if defined(TARGET_X86) || defined(TARGET_AMD64)
14043 if (store_opcode == OP_STOREI1_MEMBASE_REG)
14044 return OP_X86_SETEQ_MEMBASE;
14046 if (store_opcode == OP_STOREI1_MEMBASE_REG)
14047 return OP_X86_SETNE_MEMBASE;
14055 op_to_op_src1_membase (MonoCompile *cfg, int load_opcode, int opcode)
14058 /* FIXME: This has sign extension issues */
14060 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
14061 return OP_X86_COMPARE_MEMBASE8_IMM;
14064 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
14069 return OP_X86_PUSH_MEMBASE;
14070 case OP_COMPARE_IMM:
14071 case OP_ICOMPARE_IMM:
14072 return OP_X86_COMPARE_MEMBASE_IMM;
14075 return OP_X86_COMPARE_MEMBASE_REG;
14079 #ifdef TARGET_AMD64
14080 /* FIXME: This has sign extension issues */
14082 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
14083 return OP_X86_COMPARE_MEMBASE8_IMM;
14088 if ((load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32) || (load_opcode == OP_LOADI8_MEMBASE))
14089 return OP_X86_PUSH_MEMBASE;
14091 /* FIXME: This only works for 32 bit immediates
14092 case OP_COMPARE_IMM:
14093 case OP_LCOMPARE_IMM:
14094 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
14095 return OP_AMD64_COMPARE_MEMBASE_IMM;
14097 case OP_ICOMPARE_IMM:
14098 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
14099 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
14103 if (cfg->backend->ilp32 && load_opcode == OP_LOAD_MEMBASE)
14104 return OP_AMD64_ICOMPARE_MEMBASE_REG;
14105 if ((load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32) || (load_opcode == OP_LOADI8_MEMBASE))
14106 return OP_AMD64_COMPARE_MEMBASE_REG;
14109 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
14110 return OP_AMD64_ICOMPARE_MEMBASE_REG;
14119 op_to_op_src2_membase (MonoCompile *cfg, int load_opcode, int opcode)
14122 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
14128 return OP_X86_COMPARE_REG_MEMBASE;
14130 return OP_X86_ADD_REG_MEMBASE;
14132 return OP_X86_SUB_REG_MEMBASE;
14134 return OP_X86_AND_REG_MEMBASE;
14136 return OP_X86_OR_REG_MEMBASE;
14138 return OP_X86_XOR_REG_MEMBASE;
14142 #ifdef TARGET_AMD64
14143 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE && cfg->backend->ilp32)) {
14146 return OP_AMD64_ICOMPARE_REG_MEMBASE;
14148 return OP_X86_ADD_REG_MEMBASE;
14150 return OP_X86_SUB_REG_MEMBASE;
14152 return OP_X86_AND_REG_MEMBASE;
14154 return OP_X86_OR_REG_MEMBASE;
14156 return OP_X86_XOR_REG_MEMBASE;
14158 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32)) {
14162 return OP_AMD64_COMPARE_REG_MEMBASE;
14164 return OP_AMD64_ADD_REG_MEMBASE;
14166 return OP_AMD64_SUB_REG_MEMBASE;
14168 return OP_AMD64_AND_REG_MEMBASE;
14170 return OP_AMD64_OR_REG_MEMBASE;
14172 return OP_AMD64_XOR_REG_MEMBASE;
14181 mono_op_to_op_imm_noemul (int opcode)
14184 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
14190 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
14197 #if defined(MONO_ARCH_EMULATE_MUL_DIV)
14202 return mono_op_to_op_imm (opcode);
14207 * mono_handle_global_vregs:
14209 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
14213 mono_handle_global_vregs (MonoCompile *cfg)
14215 gint32 *vreg_to_bb;
14216 MonoBasicBlock *bb;
14219 vreg_to_bb = (gint32 *)mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
14221 #ifdef MONO_ARCH_SIMD_INTRINSICS
14222 if (cfg->uses_simd_intrinsics)
14223 mono_simd_simplify_indirection (cfg);
14226 /* Find local vregs used in more than one bb */
14227 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
14228 MonoInst *ins = bb->code;
14229 int block_num = bb->block_num;
14231 if (cfg->verbose_level > 2)
14232 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
14235 for (; ins; ins = ins->next) {
14236 const char *spec = INS_INFO (ins->opcode);
14237 int regtype = 0, regindex;
14240 if (G_UNLIKELY (cfg->verbose_level > 2))
14241 mono_print_ins (ins);
14243 g_assert (ins->opcode >= MONO_CEE_LAST);
14245 for (regindex = 0; regindex < 4; regindex ++) {
14248 if (regindex == 0) {
14249 regtype = spec [MONO_INST_DEST];
14250 if (regtype == ' ')
14253 } else if (regindex == 1) {
14254 regtype = spec [MONO_INST_SRC1];
14255 if (regtype == ' ')
14258 } else if (regindex == 2) {
14259 regtype = spec [MONO_INST_SRC2];
14260 if (regtype == ' ')
14263 } else if (regindex == 3) {
14264 regtype = spec [MONO_INST_SRC3];
14265 if (regtype == ' ')
14270 #if SIZEOF_REGISTER == 4
14271 /* In the LLVM case, the long opcodes are not decomposed */
14272 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
14274 * Since some instructions reference the original long vreg,
14275 * and some reference the two component vregs, it is quite hard
14276 * to determine when it needs to be global. So be conservative.
14278 if (!get_vreg_to_inst (cfg, vreg)) {
14279 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
14281 if (cfg->verbose_level > 2)
14282 printf ("LONG VREG R%d made global.\n", vreg);
14286 * Make the component vregs volatile since the optimizations can
14287 * get confused otherwise.
14289 get_vreg_to_inst (cfg, MONO_LVREG_LS (vreg))->flags |= MONO_INST_VOLATILE;
14290 get_vreg_to_inst (cfg, MONO_LVREG_MS (vreg))->flags |= MONO_INST_VOLATILE;
14294 g_assert (vreg != -1);
14296 prev_bb = vreg_to_bb [vreg];
14297 if (prev_bb == 0) {
14298 /* 0 is a valid block num */
14299 vreg_to_bb [vreg] = block_num + 1;
14300 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
14301 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
14304 if (!get_vreg_to_inst (cfg, vreg)) {
14305 if (G_UNLIKELY (cfg->verbose_level > 2))
14306 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
14310 if (vreg_is_ref (cfg, vreg))
14311 mono_compile_create_var_for_vreg (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL, vreg);
14313 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
14316 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
14319 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
14322 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
14325 g_assert_not_reached ();
14329 /* Flag as having been used in more than one bb */
14330 vreg_to_bb [vreg] = -1;
14336 /* If a variable is used in only one bblock, convert it into a local vreg */
14337 for (i = 0; i < cfg->num_varinfo; i++) {
14338 MonoInst *var = cfg->varinfo [i];
14339 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
14341 switch (var->type) {
14347 #if SIZEOF_REGISTER == 8
14350 #if !defined(TARGET_X86)
14351 /* Enabling this screws up the fp stack on x86 */
14354 if (mono_arch_is_soft_float ())
14358 if (var->type == STACK_VTYPE && cfg->gsharedvt && mini_is_gsharedvt_variable_type (var->inst_vtype))
14362 /* Arguments are implicitly global */
14363 /* Putting R4 vars into registers doesn't work currently */
14364 /* The gsharedvt vars are implicitly referenced by ldaddr opcodes, but those opcodes are only generated later */
14365 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg && var != cfg->gsharedvt_info_var && var != cfg->gsharedvt_locals_var && var != cfg->lmf_addr_var) {
14367 * Make that the variable's liveness interval doesn't contain a call, since
14368 * that would cause the lvreg to be spilled, making the whole optimization
14371 /* This is too slow for JIT compilation */
14373 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
14375 int def_index, call_index, ins_index;
14376 gboolean spilled = FALSE;
14381 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
14382 const char *spec = INS_INFO (ins->opcode);
14384 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
14385 def_index = ins_index;
14387 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
14388 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
14389 if (call_index > def_index) {
14395 if (MONO_IS_CALL (ins))
14396 call_index = ins_index;
14406 if (G_UNLIKELY (cfg->verbose_level > 2))
14407 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
14408 var->flags |= MONO_INST_IS_DEAD;
14409 cfg->vreg_to_inst [var->dreg] = NULL;
14416 * Compress the varinfo and vars tables so the liveness computation is faster and
14417 * takes up less space.
14420 for (i = 0; i < cfg->num_varinfo; ++i) {
14421 MonoInst *var = cfg->varinfo [i];
14422 if (pos < i && cfg->locals_start == i)
14423 cfg->locals_start = pos;
14424 if (!(var->flags & MONO_INST_IS_DEAD)) {
14426 cfg->varinfo [pos] = cfg->varinfo [i];
14427 cfg->varinfo [pos]->inst_c0 = pos;
14428 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
14429 cfg->vars [pos].idx = pos;
14430 #if SIZEOF_REGISTER == 4
14431 if (cfg->varinfo [pos]->type == STACK_I8) {
14432 /* Modify the two component vars too */
14435 var1 = get_vreg_to_inst (cfg, MONO_LVREG_LS (cfg->varinfo [pos]->dreg));
14436 var1->inst_c0 = pos;
14437 var1 = get_vreg_to_inst (cfg, MONO_LVREG_MS (cfg->varinfo [pos]->dreg));
14438 var1->inst_c0 = pos;
14445 cfg->num_varinfo = pos;
14446 if (cfg->locals_start > cfg->num_varinfo)
14447 cfg->locals_start = cfg->num_varinfo;
14451 * mono_allocate_gsharedvt_vars:
14453 * Allocate variables with gsharedvt types to entries in the MonoGSharedVtMethodRuntimeInfo.entries array.
14454 * Initialize cfg->gsharedvt_vreg_to_idx with the mapping between vregs and indexes.
14457 mono_allocate_gsharedvt_vars (MonoCompile *cfg)
14461 cfg->gsharedvt_vreg_to_idx = (int *)mono_mempool_alloc0 (cfg->mempool, sizeof (int) * cfg->next_vreg);
14463 for (i = 0; i < cfg->num_varinfo; ++i) {
14464 MonoInst *ins = cfg->varinfo [i];
14467 if (mini_is_gsharedvt_variable_type (ins->inst_vtype)) {
14468 if (i >= cfg->locals_start) {
14470 idx = get_gsharedvt_info_slot (cfg, ins->inst_vtype, MONO_RGCTX_INFO_LOCAL_OFFSET);
14471 cfg->gsharedvt_vreg_to_idx [ins->dreg] = idx + 1;
14472 ins->opcode = OP_GSHAREDVT_LOCAL;
14473 ins->inst_imm = idx;
14476 cfg->gsharedvt_vreg_to_idx [ins->dreg] = -1;
14477 ins->opcode = OP_GSHAREDVT_ARG_REGOFFSET;
14484 * mono_spill_global_vars:
14486 * Generate spill code for variables which are not allocated to registers,
14487 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
14488 * code is generated which could be optimized by the local optimization passes.
14491 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
14493 MonoBasicBlock *bb;
14495 int orig_next_vreg;
14496 guint32 *vreg_to_lvreg;
14498 guint32 i, lvregs_len;
14499 gboolean dest_has_lvreg = FALSE;
14500 MonoStackType stacktypes [128];
14501 MonoInst **live_range_start, **live_range_end;
14502 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
14504 *need_local_opts = FALSE;
14506 memset (spec2, 0, sizeof (spec2));
14508 /* FIXME: Move this function to mini.c */
14509 stacktypes ['i'] = STACK_PTR;
14510 stacktypes ['l'] = STACK_I8;
14511 stacktypes ['f'] = STACK_R8;
14512 #ifdef MONO_ARCH_SIMD_INTRINSICS
14513 stacktypes ['x'] = STACK_VTYPE;
14516 #if SIZEOF_REGISTER == 4
14517 /* Create MonoInsts for longs */
14518 for (i = 0; i < cfg->num_varinfo; i++) {
14519 MonoInst *ins = cfg->varinfo [i];
14521 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
14522 switch (ins->type) {
14527 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
14530 g_assert (ins->opcode == OP_REGOFFSET);
14532 tree = get_vreg_to_inst (cfg, MONO_LVREG_LS (ins->dreg));
14534 tree->opcode = OP_REGOFFSET;
14535 tree->inst_basereg = ins->inst_basereg;
14536 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
14538 tree = get_vreg_to_inst (cfg, MONO_LVREG_MS (ins->dreg));
14540 tree->opcode = OP_REGOFFSET;
14541 tree->inst_basereg = ins->inst_basereg;
14542 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
14552 if (cfg->compute_gc_maps) {
14553 /* registers need liveness info even for !non refs */
14554 for (i = 0; i < cfg->num_varinfo; i++) {
14555 MonoInst *ins = cfg->varinfo [i];
14557 if (ins->opcode == OP_REGVAR)
14558 ins->flags |= MONO_INST_GC_TRACK;
14562 /* FIXME: widening and truncation */
14565 * As an optimization, when a variable allocated to the stack is first loaded into
14566 * an lvreg, we will remember the lvreg and use it the next time instead of loading
14567 * the variable again.
14569 orig_next_vreg = cfg->next_vreg;
14570 vreg_to_lvreg = (guint32 *)mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
14571 lvregs = (guint32 *)mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
14575 * These arrays contain the first and last instructions accessing a given
14577 * Since we emit bblocks in the same order we process them here, and we
14578 * don't split live ranges, these will precisely describe the live range of
14579 * the variable, i.e. the instruction range where a valid value can be found
14580 * in the variables location.
14581 * The live range is computed using the liveness info computed by the liveness pass.
14582 * We can't use vmv->range, since that is an abstract live range, and we need
14583 * one which is instruction precise.
14584 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
14586 /* FIXME: Only do this if debugging info is requested */
14587 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
14588 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
14589 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
14590 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
14592 /* Add spill loads/stores */
14593 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
14596 if (cfg->verbose_level > 2)
14597 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
14599 /* Clear vreg_to_lvreg array */
14600 for (i = 0; i < lvregs_len; i++)
14601 vreg_to_lvreg [lvregs [i]] = 0;
14605 MONO_BB_FOR_EACH_INS (bb, ins) {
14606 const char *spec = INS_INFO (ins->opcode);
14607 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
14608 gboolean store, no_lvreg;
14609 int sregs [MONO_MAX_SRC_REGS];
14611 if (G_UNLIKELY (cfg->verbose_level > 2))
14612 mono_print_ins (ins);
14614 if (ins->opcode == OP_NOP)
14618 * We handle LDADDR here as well, since it can only be decomposed
14619 * when variable addresses are known.
14621 if (ins->opcode == OP_LDADDR) {
14622 MonoInst *var = (MonoInst *)ins->inst_p0;
14624 if (var->opcode == OP_VTARG_ADDR) {
14625 /* Happens on SPARC/S390 where vtypes are passed by reference */
14626 MonoInst *vtaddr = var->inst_left;
14627 if (vtaddr->opcode == OP_REGVAR) {
14628 ins->opcode = OP_MOVE;
14629 ins->sreg1 = vtaddr->dreg;
14631 else if (var->inst_left->opcode == OP_REGOFFSET) {
14632 ins->opcode = OP_LOAD_MEMBASE;
14633 ins->inst_basereg = vtaddr->inst_basereg;
14634 ins->inst_offset = vtaddr->inst_offset;
14637 } else if (cfg->gsharedvt && cfg->gsharedvt_vreg_to_idx [var->dreg] < 0) {
14638 /* gsharedvt arg passed by ref */
14639 g_assert (var->opcode == OP_GSHAREDVT_ARG_REGOFFSET);
14641 ins->opcode = OP_LOAD_MEMBASE;
14642 ins->inst_basereg = var->inst_basereg;
14643 ins->inst_offset = var->inst_offset;
14644 } else if (cfg->gsharedvt && cfg->gsharedvt_vreg_to_idx [var->dreg]) {
14645 MonoInst *load, *load2, *load3;
14646 int idx = cfg->gsharedvt_vreg_to_idx [var->dreg] - 1;
14647 int reg1, reg2, reg3;
14648 MonoInst *info_var = cfg->gsharedvt_info_var;
14649 MonoInst *locals_var = cfg->gsharedvt_locals_var;
14653 * Compute the address of the local as gsharedvt_locals_var + gsharedvt_info_var->locals_offsets [idx].
14656 g_assert (var->opcode == OP_GSHAREDVT_LOCAL);
14658 g_assert (info_var);
14659 g_assert (locals_var);
14661 /* Mark the instruction used to compute the locals var as used */
14662 cfg->gsharedvt_locals_var_ins = NULL;
14664 /* Load the offset */
14665 if (info_var->opcode == OP_REGOFFSET) {
14666 reg1 = alloc_ireg (cfg);
14667 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, reg1, info_var->inst_basereg, info_var->inst_offset);
14668 } else if (info_var->opcode == OP_REGVAR) {
14670 reg1 = info_var->dreg;
14672 g_assert_not_reached ();
14674 reg2 = alloc_ireg (cfg);
14675 NEW_LOAD_MEMBASE (cfg, load2, OP_LOADI4_MEMBASE, reg2, reg1, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
14676 /* Load the locals area address */
14677 reg3 = alloc_ireg (cfg);
14678 if (locals_var->opcode == OP_REGOFFSET) {
14679 NEW_LOAD_MEMBASE (cfg, load3, OP_LOAD_MEMBASE, reg3, locals_var->inst_basereg, locals_var->inst_offset);
14680 } else if (locals_var->opcode == OP_REGVAR) {
14681 NEW_UNALU (cfg, load3, OP_MOVE, reg3, locals_var->dreg);
14683 g_assert_not_reached ();
14685 /* Compute the address */
14686 ins->opcode = OP_PADD;
14690 mono_bblock_insert_before_ins (bb, ins, load3);
14691 mono_bblock_insert_before_ins (bb, load3, load2);
14693 mono_bblock_insert_before_ins (bb, load2, load);
14695 g_assert (var->opcode == OP_REGOFFSET);
14697 ins->opcode = OP_ADD_IMM;
14698 ins->sreg1 = var->inst_basereg;
14699 ins->inst_imm = var->inst_offset;
14702 *need_local_opts = TRUE;
14703 spec = INS_INFO (ins->opcode);
14706 if (ins->opcode < MONO_CEE_LAST) {
14707 mono_print_ins (ins);
14708 g_assert_not_reached ();
14712 * Store opcodes have destbasereg in the dreg, but in reality, it is an
14716 if (MONO_IS_STORE_MEMBASE (ins)) {
14717 tmp_reg = ins->dreg;
14718 ins->dreg = ins->sreg2;
14719 ins->sreg2 = tmp_reg;
14722 spec2 [MONO_INST_DEST] = ' ';
14723 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
14724 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
14725 spec2 [MONO_INST_SRC3] = ' ';
14727 } else if (MONO_IS_STORE_MEMINDEX (ins))
14728 g_assert_not_reached ();
14733 if (G_UNLIKELY (cfg->verbose_level > 2)) {
14734 printf ("\t %.3s %d", spec, ins->dreg);
14735 num_sregs = mono_inst_get_src_registers (ins, sregs);
14736 for (srcindex = 0; srcindex < num_sregs; ++srcindex)
14737 printf (" %d", sregs [srcindex]);
14744 regtype = spec [MONO_INST_DEST];
14745 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
14748 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
14749 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
14750 MonoInst *store_ins;
14752 MonoInst *def_ins = ins;
14753 int dreg = ins->dreg; /* The original vreg */
14755 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
14757 if (var->opcode == OP_REGVAR) {
14758 ins->dreg = var->dreg;
14759 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
14761 * Instead of emitting a load+store, use a _membase opcode.
14763 g_assert (var->opcode == OP_REGOFFSET);
14764 if (ins->opcode == OP_MOVE) {
14768 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
14769 ins->inst_basereg = var->inst_basereg;
14770 ins->inst_offset = var->inst_offset;
14773 spec = INS_INFO (ins->opcode);
14777 g_assert (var->opcode == OP_REGOFFSET);
14779 prev_dreg = ins->dreg;
14781 /* Invalidate any previous lvreg for this vreg */
14782 vreg_to_lvreg [ins->dreg] = 0;
14786 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
14788 store_opcode = OP_STOREI8_MEMBASE_REG;
14791 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
14793 #if SIZEOF_REGISTER != 8
14794 if (regtype == 'l') {
14795 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, MONO_LVREG_LS (ins->dreg));
14796 mono_bblock_insert_after_ins (bb, ins, store_ins);
14797 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, MONO_LVREG_MS (ins->dreg));
14798 mono_bblock_insert_after_ins (bb, ins, store_ins);
14799 def_ins = store_ins;
14804 g_assert (store_opcode != OP_STOREV_MEMBASE);
14806 /* Try to fuse the store into the instruction itself */
14807 /* FIXME: Add more instructions */
14808 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
14809 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
14810 ins->inst_imm = ins->inst_c0;
14811 ins->inst_destbasereg = var->inst_basereg;
14812 ins->inst_offset = var->inst_offset;
14813 spec = INS_INFO (ins->opcode);
14814 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE) || (ins->opcode == OP_RMOVE))) {
14815 ins->opcode = store_opcode;
14816 ins->inst_destbasereg = var->inst_basereg;
14817 ins->inst_offset = var->inst_offset;
14821 tmp_reg = ins->dreg;
14822 ins->dreg = ins->sreg2;
14823 ins->sreg2 = tmp_reg;
14826 spec2 [MONO_INST_DEST] = ' ';
14827 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
14828 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
14829 spec2 [MONO_INST_SRC3] = ' ';
14831 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
14832 // FIXME: The backends expect the base reg to be in inst_basereg
14833 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
14835 ins->inst_basereg = var->inst_basereg;
14836 ins->inst_offset = var->inst_offset;
14837 spec = INS_INFO (ins->opcode);
14839 /* printf ("INS: "); mono_print_ins (ins); */
14840 /* Create a store instruction */
14841 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
14843 /* Insert it after the instruction */
14844 mono_bblock_insert_after_ins (bb, ins, store_ins);
14846 def_ins = store_ins;
14849 * We can't assign ins->dreg to var->dreg here, since the
14850 * sregs could use it. So set a flag, and do it after
14853 if ((!cfg->backend->use_fpstack || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
14854 dest_has_lvreg = TRUE;
14859 if (def_ins && !live_range_start [dreg]) {
14860 live_range_start [dreg] = def_ins;
14861 live_range_start_bb [dreg] = bb;
14864 if (cfg->compute_gc_maps && def_ins && (var->flags & MONO_INST_GC_TRACK)) {
14867 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_DEF);
14868 tmp->inst_c1 = dreg;
14869 mono_bblock_insert_after_ins (bb, def_ins, tmp);
14876 num_sregs = mono_inst_get_src_registers (ins, sregs);
14877 for (srcindex = 0; srcindex < 3; ++srcindex) {
14878 regtype = spec [MONO_INST_SRC1 + srcindex];
14879 sreg = sregs [srcindex];
14881 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
14882 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
14883 MonoInst *var = get_vreg_to_inst (cfg, sreg);
14884 MonoInst *use_ins = ins;
14885 MonoInst *load_ins;
14886 guint32 load_opcode;
14888 if (var->opcode == OP_REGVAR) {
14889 sregs [srcindex] = var->dreg;
14890 //mono_inst_set_src_registers (ins, sregs);
14891 live_range_end [sreg] = use_ins;
14892 live_range_end_bb [sreg] = bb;
14894 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
14897 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
14898 /* var->dreg is a hreg */
14899 tmp->inst_c1 = sreg;
14900 mono_bblock_insert_after_ins (bb, ins, tmp);
14906 g_assert (var->opcode == OP_REGOFFSET);
14908 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
14910 g_assert (load_opcode != OP_LOADV_MEMBASE);
14912 if (vreg_to_lvreg [sreg]) {
14913 g_assert (vreg_to_lvreg [sreg] != -1);
14915 /* The variable is already loaded to an lvreg */
14916 if (G_UNLIKELY (cfg->verbose_level > 2))
14917 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
14918 sregs [srcindex] = vreg_to_lvreg [sreg];
14919 //mono_inst_set_src_registers (ins, sregs);
14923 /* Try to fuse the load into the instruction */
14924 if ((srcindex == 0) && (op_to_op_src1_membase (cfg, load_opcode, ins->opcode) != -1)) {
14925 ins->opcode = op_to_op_src1_membase (cfg, load_opcode, ins->opcode);
14926 sregs [0] = var->inst_basereg;
14927 //mono_inst_set_src_registers (ins, sregs);
14928 ins->inst_offset = var->inst_offset;
14929 } else if ((srcindex == 1) && (op_to_op_src2_membase (cfg, load_opcode, ins->opcode) != -1)) {
14930 ins->opcode = op_to_op_src2_membase (cfg, load_opcode, ins->opcode);
14931 sregs [1] = var->inst_basereg;
14932 //mono_inst_set_src_registers (ins, sregs);
14933 ins->inst_offset = var->inst_offset;
14935 if (MONO_IS_REAL_MOVE (ins)) {
14936 ins->opcode = OP_NOP;
14939 //printf ("%d ", srcindex); mono_print_ins (ins);
14941 sreg = alloc_dreg (cfg, stacktypes [regtype]);
14943 if ((!cfg->backend->use_fpstack || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
14944 if (var->dreg == prev_dreg) {
14946 * sreg refers to the value loaded by the load
14947 * emitted below, but we need to use ins->dreg
14948 * since it refers to the store emitted earlier.
14952 g_assert (sreg != -1);
14953 vreg_to_lvreg [var->dreg] = sreg;
14954 g_assert (lvregs_len < 1024);
14955 lvregs [lvregs_len ++] = var->dreg;
14959 sregs [srcindex] = sreg;
14960 //mono_inst_set_src_registers (ins, sregs);
14962 #if SIZEOF_REGISTER != 8
14963 if (regtype == 'l') {
14964 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, MONO_LVREG_MS (sreg), var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
14965 mono_bblock_insert_before_ins (bb, ins, load_ins);
14966 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, MONO_LVREG_LS (sreg), var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
14967 mono_bblock_insert_before_ins (bb, ins, load_ins);
14968 use_ins = load_ins;
14973 #if SIZEOF_REGISTER == 4
14974 g_assert (load_opcode != OP_LOADI8_MEMBASE);
14976 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
14977 mono_bblock_insert_before_ins (bb, ins, load_ins);
14978 use_ins = load_ins;
14982 if (var->dreg < orig_next_vreg) {
14983 live_range_end [var->dreg] = use_ins;
14984 live_range_end_bb [var->dreg] = bb;
14987 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
14990 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
14991 tmp->inst_c1 = var->dreg;
14992 mono_bblock_insert_after_ins (bb, ins, tmp);
14996 mono_inst_set_src_registers (ins, sregs);
14998 if (dest_has_lvreg) {
14999 g_assert (ins->dreg != -1);
15000 vreg_to_lvreg [prev_dreg] = ins->dreg;
15001 g_assert (lvregs_len < 1024);
15002 lvregs [lvregs_len ++] = prev_dreg;
15003 dest_has_lvreg = FALSE;
15007 tmp_reg = ins->dreg;
15008 ins->dreg = ins->sreg2;
15009 ins->sreg2 = tmp_reg;
15012 if (MONO_IS_CALL (ins)) {
15013 /* Clear vreg_to_lvreg array */
15014 for (i = 0; i < lvregs_len; i++)
15015 vreg_to_lvreg [lvregs [i]] = 0;
15017 } else if (ins->opcode == OP_NOP) {
15019 MONO_INST_NULLIFY_SREGS (ins);
15022 if (cfg->verbose_level > 2)
15023 mono_print_ins_index (1, ins);
15026 /* Extend the live range based on the liveness info */
15027 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
15028 for (i = 0; i < cfg->num_varinfo; i ++) {
15029 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
15031 if (vreg_is_volatile (cfg, vi->vreg))
15032 /* The liveness info is incomplete */
15035 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
15036 /* Live from at least the first ins of this bb */
15037 live_range_start [vi->vreg] = bb->code;
15038 live_range_start_bb [vi->vreg] = bb;
15041 if (mono_bitset_test_fast (bb->live_out_set, i)) {
15042 /* Live at least until the last ins of this bb */
15043 live_range_end [vi->vreg] = bb->last_ins;
15044 live_range_end_bb [vi->vreg] = bb;
15051 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
15052 * by storing the current native offset into MonoMethodVar->live_range_start/end.
15054 if (cfg->backend->have_liverange_ops && cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
15055 for (i = 0; i < cfg->num_varinfo; ++i) {
15056 int vreg = MONO_VARINFO (cfg, i)->vreg;
15059 if (live_range_start [vreg]) {
15060 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
15062 ins->inst_c1 = vreg;
15063 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
15065 if (live_range_end [vreg]) {
15066 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
15068 ins->inst_c1 = vreg;
15069 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
15070 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
15072 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
15077 if (cfg->gsharedvt_locals_var_ins) {
15078 /* Nullify if unused */
15079 cfg->gsharedvt_locals_var_ins->opcode = OP_PCONST;
15080 cfg->gsharedvt_locals_var_ins->inst_imm = 0;
15083 g_free (live_range_start);
15084 g_free (live_range_end);
15085 g_free (live_range_start_bb);
15086 g_free (live_range_end_bb);
15090 mono_decompose_typecheck (MonoCompile *cfg, MonoBasicBlock *bb, MonoInst *ins)
15092 MonoInst *ret, *move, *source;
15093 MonoClass *klass = ins->klass;
15094 int context_used = mini_class_check_context_used (cfg, klass);
15095 int is_isinst = ins->opcode == OP_ISINST;
15096 g_assert (is_isinst || ins->opcode == OP_CASTCLASS);
15097 source = get_vreg_to_inst (cfg, ins->sreg1);
15098 if (!source || source == (MonoInst *) -1)
15099 source = mono_compile_create_var_for_vreg (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL, ins->sreg1);
15100 g_assert (source && source != (MonoInst *) -1);
15102 MonoBasicBlock *first_bb;
15103 NEW_BBLOCK (cfg, first_bb);
15104 cfg->cbb = first_bb;
15106 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
15108 ret = emit_isinst_with_cache_nonshared (cfg, source, klass);
15110 ret = emit_castclass_with_cache_nonshared (cfg, source, klass);
15111 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
15112 MonoInst *iargs [1];
15115 iargs [0] = source;
15117 MonoMethod *wrapper = mono_marshal_get_isinst (klass);
15118 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper), iargs, 0, 0, TRUE);
15120 MonoMethod *wrapper = mono_marshal_get_castclass (klass);
15121 save_cast_details (cfg, klass, source->dreg, TRUE);
15122 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper), iargs, 0, 0, TRUE);
15123 reset_cast_details (cfg);
15125 g_assert (costs > 0);
15129 ret = handle_isinst (cfg, klass, source, context_used);
15131 ret = handle_castclass (cfg, klass, source, context_used);
15133 EMIT_NEW_UNALU (cfg, move, OP_MOVE, ins->dreg, ret->dreg);
15135 g_assert (cfg->cbb->code || first_bb->code);
15136 MonoInst *prev = ins->prev;
15137 mono_replace_ins (cfg, bb, ins, &prev, first_bb, cfg->cbb);
15141 mono_decompose_typechecks (MonoCompile *cfg)
15143 for (MonoBasicBlock *bb = cfg->bb_entry; bb; bb = bb->next_bb) {
15145 MONO_BB_FOR_EACH_INS (bb, ins) {
15146 switch (ins->opcode) {
15149 mono_decompose_typecheck (cfg, bb, ins);
15159 * - use 'iadd' instead of 'int_add'
15160 * - handling ovf opcodes: decompose in method_to_ir.
15161 * - unify iregs/fregs
15162 * -> partly done, the missing parts are:
15163 * - a more complete unification would involve unifying the hregs as well, so
15164 * code wouldn't need if (fp) all over the place. but that would mean the hregs
15165 * would no longer map to the machine hregs, so the code generators would need to
15166 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
15167 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
15168 * fp/non-fp branches speeds it up by about 15%.
15169 * - use sext/zext opcodes instead of shifts
15171 * - get rid of TEMPLOADs if possible and use vregs instead
15172 * - clean up usage of OP_P/OP_ opcodes
15173 * - cleanup usage of DUMMY_USE
15174 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
15176 * - set the stack type and allocate a dreg in the EMIT_NEW macros
15177 * - get rid of all the <foo>2 stuff when the new JIT is ready.
15178 * - make sure handle_stack_args () is called before the branch is emitted
15179 * - when the new IR is done, get rid of all unused stuff
15180 * - COMPARE/BEQ as separate instructions or unify them ?
15181 * - keeping them separate allows specialized compare instructions like
15182 * compare_imm, compare_membase
15183 * - most back ends unify fp compare+branch, fp compare+ceq
15184 * - integrate mono_save_args into inline_method
15185 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
15186 * - handle long shift opts on 32 bit platforms somehow: they require
15187 * 3 sregs (2 for arg1 and 1 for arg2)
15188 * - make byref a 'normal' type.
15189 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
15190 * variable if needed.
15191 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
15192 * like inline_method.
15193 * - remove inlining restrictions
15194 * - fix LNEG and enable cfold of INEG
15195 * - generalize x86 optimizations like ldelema as a peephole optimization
15196 * - add store_mem_imm for amd64
15197 * - optimize the loading of the interruption flag in the managed->native wrappers
15198 * - avoid special handling of OP_NOP in passes
15199 * - move code inserting instructions into one function/macro.
15200 * - try a coalescing phase after liveness analysis
15201 * - add float -> vreg conversion + local optimizations on !x86
15202 * - figure out how to handle decomposed branches during optimizations, ie.
15203 * compare+branch, op_jump_table+op_br etc.
15204 * - promote RuntimeXHandles to vregs
15205 * - vtype cleanups:
15206 * - add a NEW_VARLOADA_VREG macro
15207 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
15208 * accessing vtype fields.
15209 * - get rid of I8CONST on 64 bit platforms
15210 * - dealing with the increase in code size due to branches created during opcode
15212 * - use extended basic blocks
15213 * - all parts of the JIT
15214 * - handle_global_vregs () && local regalloc
15215 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
15216 * - sources of increase in code size:
15219 * - isinst and castclass
15220 * - lvregs not allocated to global registers even if used multiple times
15221 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
15223 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
15224 * - add all micro optimizations from the old JIT
15225 * - put tree optimizations into the deadce pass
15226 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
15227 * specific function.
15228 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
15229 * fcompare + branchCC.
15230 * - create a helper function for allocating a stack slot, taking into account
15231 * MONO_CFG_HAS_SPILLUP.
15233 * - merge the ia64 switch changes.
15234 * - optimize mono_regstate2_alloc_int/float.
15235 * - fix the pessimistic handling of variables accessed in exception handler blocks.
15236 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
15237 * parts of the tree could be separated by other instructions, killing the tree
15238 * arguments, or stores killing loads etc. Also, should we fold loads into other
15239 * instructions if the result of the load is used multiple times ?
15240 * - make the REM_IMM optimization in mini-x86.c arch-independent.
15241 * - LAST MERGE: 108395.
15242 * - when returning vtypes in registers, generate IR and append it to the end of the
15243 * last bb instead of doing it in the epilog.
15244 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
15252 - When to decompose opcodes:
15253 - earlier: this makes some optimizations hard to implement, since the low level IR
15254 no longer contains the neccessary information. But it is easier to do.
15255 - later: harder to implement, enables more optimizations.
15256 - Branches inside bblocks:
15257 - created when decomposing complex opcodes.
15258 - branches to another bblock: harmless, but not tracked by the branch
15259 optimizations, so need to branch to a label at the start of the bblock.
15260 - branches to inside the same bblock: very problematic, trips up the local
15261 reg allocator. Can be fixed by spitting the current bblock, but that is a
15262 complex operation, since some local vregs can become global vregs etc.
15263 - Local/global vregs:
15264 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
15265 local register allocator.
15266 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
15267 structure, created by mono_create_var (). Assigned to hregs or the stack by
15268 the global register allocator.
15269 - When to do optimizations like alu->alu_imm:
15270 - earlier -> saves work later on since the IR will be smaller/simpler
15271 - later -> can work on more instructions
15272 - Handling of valuetypes:
15273 - When a vtype is pushed on the stack, a new temporary is created, an
15274 instruction computing its address (LDADDR) is emitted and pushed on
15275 the stack. Need to optimize cases when the vtype is used immediately as in
15276 argument passing, stloc etc.
15277 - Instead of the to_end stuff in the old JIT, simply call the function handling
15278 the values on the stack before emitting the last instruction of the bb.
15281 #endif /* DISABLE_JIT */