2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
9 * Copyright 2003-2010 Novell, Inc (http://www.novell.com)
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
27 #ifdef HAVE_SYS_TIME_H
35 #include <mono/utils/memcheck.h>
37 #include <mono/metadata/abi-details.h>
38 #include <mono/metadata/assembly.h>
39 #include <mono/metadata/attrdefs.h>
40 #include <mono/metadata/loader.h>
41 #include <mono/metadata/tabledefs.h>
42 #include <mono/metadata/class.h>
43 #include <mono/metadata/object.h>
44 #include <mono/metadata/exception.h>
45 #include <mono/metadata/opcodes.h>
46 #include <mono/metadata/mono-endian.h>
47 #include <mono/metadata/tokentype.h>
48 #include <mono/metadata/tabledefs.h>
49 #include <mono/metadata/marshal.h>
50 #include <mono/metadata/debug-helpers.h>
51 #include <mono/metadata/mono-debug.h>
52 #include <mono/metadata/mono-debug-debugger.h>
53 #include <mono/metadata/gc-internals.h>
54 #include <mono/metadata/security-manager.h>
55 #include <mono/metadata/threads-types.h>
56 #include <mono/metadata/security-core-clr.h>
57 #include <mono/metadata/profiler-private.h>
58 #include <mono/metadata/profiler.h>
59 #include <mono/metadata/debug-mono-symfile.h>
60 #include <mono/utils/mono-compiler.h>
61 #include <mono/utils/mono-memory-model.h>
62 #include <mono/metadata/mono-basic-block.h>
68 #include "jit-icalls.h"
70 #include "debugger-agent.h"
71 #include "seq-points.h"
72 #include "aot-compiler.h"
74 #define BRANCH_COST 10
75 #define INLINE_LENGTH_LIMIT 20
77 /* These have 'cfg' as an implicit argument */
78 #define INLINE_FAILURE(msg) do { \
79 if ((cfg->method != cfg->current_method) && (cfg->current_method->wrapper_type == MONO_WRAPPER_NONE)) { \
80 inline_failure (cfg, msg); \
81 goto exception_exit; \
84 #define CHECK_CFG_EXCEPTION do {\
85 if (cfg->exception_type != MONO_EXCEPTION_NONE) \
86 goto exception_exit; \
88 #define METHOD_ACCESS_FAILURE(method, cmethod) do { \
89 method_access_failure ((cfg), (method), (cmethod)); \
90 goto exception_exit; \
92 #define FIELD_ACCESS_FAILURE(method, field) do { \
93 field_access_failure ((cfg), (method), (field)); \
94 goto exception_exit; \
96 #define GENERIC_SHARING_FAILURE(opcode) do { \
98 gshared_failure (cfg, opcode, __FILE__, __LINE__); \
99 goto exception_exit; \
102 #define GSHAREDVT_FAILURE(opcode) do { \
103 if (cfg->gsharedvt) { \
104 gsharedvt_failure (cfg, opcode, __FILE__, __LINE__); \
105 goto exception_exit; \
108 #define OUT_OF_MEMORY_FAILURE do { \
109 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OUT_OF_MEMORY); \
110 goto exception_exit; \
112 #define DISABLE_AOT(cfg) do { \
113 if ((cfg)->verbose_level >= 2) \
114 printf ("AOT disabled: %s:%d\n", __FILE__, __LINE__); \
115 (cfg)->disable_aot = TRUE; \
117 #define LOAD_ERROR do { \
118 break_on_unverified (); \
119 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD); \
120 goto exception_exit; \
123 #define TYPE_LOAD_ERROR(klass) do { \
124 cfg->exception_ptr = klass; \
128 #define CHECK_CFG_ERROR do {\
129 if (!mono_error_ok (&cfg->error)) { \
130 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); \
131 goto mono_error_exit; \
135 /* Determine whenever 'ins' represents a load of the 'this' argument */
136 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
138 static int ldind_to_load_membase (int opcode);
139 static int stind_to_store_membase (int opcode);
141 int mono_op_to_op_imm (int opcode);
142 int mono_op_to_op_imm_noemul (int opcode);
144 MONO_API MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
146 static int inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
147 guchar *ip, guint real_offset, gboolean inline_always);
149 /* helper methods signatures */
150 static MonoMethodSignature *helper_sig_domain_get;
151 static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
154 * Instruction metadata
162 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
163 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
169 #if SIZEOF_REGISTER == 8 && SIZEOF_REGISTER == SIZEOF_VOID_P
174 /* keep in sync with the enum in mini.h */
177 #include "mini-ops.h"
182 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
183 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
185 * This should contain the index of the last sreg + 1. This is not the same
186 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
188 const gint8 ins_sreg_counts[] = {
189 #include "mini-ops.h"
194 #define MONO_INIT_VARINFO(vi,id) do { \
195 (vi)->range.first_use.pos.bid = 0xffff; \
201 mono_alloc_ireg (MonoCompile *cfg)
203 return alloc_ireg (cfg);
207 mono_alloc_lreg (MonoCompile *cfg)
209 return alloc_lreg (cfg);
213 mono_alloc_freg (MonoCompile *cfg)
215 return alloc_freg (cfg);
219 mono_alloc_preg (MonoCompile *cfg)
221 return alloc_preg (cfg);
225 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
227 return alloc_dreg (cfg, stack_type);
231 * mono_alloc_ireg_ref:
233 * Allocate an IREG, and mark it as holding a GC ref.
236 mono_alloc_ireg_ref (MonoCompile *cfg)
238 return alloc_ireg_ref (cfg);
242 * mono_alloc_ireg_mp:
244 * Allocate an IREG, and mark it as holding a managed pointer.
247 mono_alloc_ireg_mp (MonoCompile *cfg)
249 return alloc_ireg_mp (cfg);
253 * mono_alloc_ireg_copy:
255 * Allocate an IREG with the same GC type as VREG.
258 mono_alloc_ireg_copy (MonoCompile *cfg, guint32 vreg)
260 if (vreg_is_ref (cfg, vreg))
261 return alloc_ireg_ref (cfg);
262 else if (vreg_is_mp (cfg, vreg))
263 return alloc_ireg_mp (cfg);
265 return alloc_ireg (cfg);
269 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
274 type = mini_get_underlying_type (type);
276 switch (type->type) {
289 case MONO_TYPE_FNPTR:
291 case MONO_TYPE_CLASS:
292 case MONO_TYPE_STRING:
293 case MONO_TYPE_OBJECT:
294 case MONO_TYPE_SZARRAY:
295 case MONO_TYPE_ARRAY:
299 #if SIZEOF_REGISTER == 8
305 return cfg->r4fp ? OP_RMOVE : OP_FMOVE;
308 case MONO_TYPE_VALUETYPE:
309 if (type->data.klass->enumtype) {
310 type = mono_class_enum_basetype (type->data.klass);
313 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
316 case MONO_TYPE_TYPEDBYREF:
318 case MONO_TYPE_GENERICINST:
319 type = &type->data.generic_class->container_class->byval_arg;
323 g_assert (cfg->gshared);
324 if (mini_type_var_is_vt (type))
327 return mono_type_to_regmove (cfg, mini_get_underlying_type (type));
329 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
335 mono_print_bb (MonoBasicBlock *bb, const char *msg)
340 printf ("\n%s %d: [IN: ", msg, bb->block_num);
341 for (i = 0; i < bb->in_count; ++i)
342 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
344 for (i = 0; i < bb->out_count; ++i)
345 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
347 for (tree = bb->code; tree; tree = tree->next)
348 mono_print_ins_index (-1, tree);
352 mono_create_helper_signatures (void)
354 helper_sig_domain_get = mono_create_icall_signature ("ptr");
355 helper_sig_rgctx_lazy_fetch_trampoline = mono_create_icall_signature ("ptr ptr");
358 static MONO_NEVER_INLINE void
359 break_on_unverified (void)
361 if (mini_get_debug_options ()->break_on_unverified)
365 static MONO_NEVER_INLINE void
366 method_access_failure (MonoCompile *cfg, MonoMethod *method, MonoMethod *cil_method)
368 char *method_fname = mono_method_full_name (method, TRUE);
369 char *cil_method_fname = mono_method_full_name (cil_method, TRUE);
370 mono_cfg_set_exception (cfg, MONO_EXCEPTION_METHOD_ACCESS);
371 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname);
372 g_free (method_fname);
373 g_free (cil_method_fname);
376 static MONO_NEVER_INLINE void
377 field_access_failure (MonoCompile *cfg, MonoMethod *method, MonoClassField *field)
379 char *method_fname = mono_method_full_name (method, TRUE);
380 char *field_fname = mono_field_full_name (field);
381 mono_cfg_set_exception (cfg, MONO_EXCEPTION_FIELD_ACCESS);
382 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname);
383 g_free (method_fname);
384 g_free (field_fname);
387 static MONO_NEVER_INLINE void
388 inline_failure (MonoCompile *cfg, const char *msg)
390 if (cfg->verbose_level >= 2)
391 printf ("inline failed: %s\n", msg);
392 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INLINE_FAILED);
395 static MONO_NEVER_INLINE void
396 gshared_failure (MonoCompile *cfg, int opcode, const char *file, int line)
398 if (cfg->verbose_level > 2) \
399 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), line);
400 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
403 static MONO_NEVER_INLINE void
404 gsharedvt_failure (MonoCompile *cfg, int opcode, const char *file, int line)
406 cfg->exception_message = g_strdup_printf ("gsharedvt failed for method %s.%s.%s/%d opcode %s %s:%d", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), file, line);
407 if (cfg->verbose_level >= 2)
408 printf ("%s\n", cfg->exception_message);
409 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
413 * When using gsharedvt, some instatiations might be verifiable, and some might be not. i.e.
414 * foo<T> (int i) { ldarg.0; box T; }
416 #define UNVERIFIED do { \
417 if (cfg->gsharedvt) { \
418 if (cfg->verbose_level > 2) \
419 printf ("gsharedvt method failed to verify, falling back to instantiation.\n"); \
420 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
421 goto exception_exit; \
423 break_on_unverified (); \
427 #define GET_BBLOCK(cfg,tblock,ip) do { \
428 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
430 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
431 NEW_BBLOCK (cfg, (tblock)); \
432 (tblock)->cil_code = (ip); \
433 ADD_BBLOCK (cfg, (tblock)); \
437 #if defined(TARGET_X86) || defined(TARGET_AMD64)
438 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
439 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
440 (dest)->dreg = alloc_ireg_mp ((cfg)); \
441 (dest)->sreg1 = (sr1); \
442 (dest)->sreg2 = (sr2); \
443 (dest)->inst_imm = (imm); \
444 (dest)->backend.shift_amount = (shift); \
445 MONO_ADD_INS ((cfg)->cbb, (dest)); \
449 /* Emit conversions so both operands of a binary opcode are of the same type */
451 add_widen_op (MonoCompile *cfg, MonoInst *ins, MonoInst **arg1_ref, MonoInst **arg2_ref)
453 MonoInst *arg1 = *arg1_ref;
454 MonoInst *arg2 = *arg2_ref;
457 ((arg1->type == STACK_R4 && arg2->type == STACK_R8) ||
458 (arg1->type == STACK_R8 && arg2->type == STACK_R4))) {
461 /* Mixing r4/r8 is allowed by the spec */
462 if (arg1->type == STACK_R4) {
463 int dreg = alloc_freg (cfg);
465 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg1->dreg);
466 conv->type = STACK_R8;
470 if (arg2->type == STACK_R4) {
471 int dreg = alloc_freg (cfg);
473 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg2->dreg);
474 conv->type = STACK_R8;
480 #if SIZEOF_REGISTER == 8
481 /* FIXME: Need to add many more cases */
482 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) {
485 int dr = alloc_preg (cfg);
486 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg);
487 (ins)->sreg2 = widen->dreg;
492 #define ADD_BINOP(op) do { \
493 MONO_INST_NEW (cfg, ins, (op)); \
495 ins->sreg1 = sp [0]->dreg; \
496 ins->sreg2 = sp [1]->dreg; \
497 type_from_op (cfg, ins, sp [0], sp [1]); \
499 /* Have to insert a widening op */ \
500 add_widen_op (cfg, ins, &sp [0], &sp [1]); \
501 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
502 MONO_ADD_INS ((cfg)->cbb, (ins)); \
503 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
506 #define ADD_UNOP(op) do { \
507 MONO_INST_NEW (cfg, ins, (op)); \
509 ins->sreg1 = sp [0]->dreg; \
510 type_from_op (cfg, ins, sp [0], NULL); \
512 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
513 MONO_ADD_INS ((cfg)->cbb, (ins)); \
514 *sp++ = mono_decompose_opcode (cfg, ins); \
517 #define ADD_BINCOND(next_block) do { \
520 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
521 cmp->sreg1 = sp [0]->dreg; \
522 cmp->sreg2 = sp [1]->dreg; \
523 type_from_op (cfg, cmp, sp [0], sp [1]); \
525 add_widen_op (cfg, cmp, &sp [0], &sp [1]); \
526 type_from_op (cfg, ins, sp [0], sp [1]); \
527 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
528 GET_BBLOCK (cfg, tblock, target); \
529 link_bblock (cfg, cfg->cbb, tblock); \
530 ins->inst_true_bb = tblock; \
531 if ((next_block)) { \
532 link_bblock (cfg, cfg->cbb, (next_block)); \
533 ins->inst_false_bb = (next_block); \
534 start_new_bblock = 1; \
536 GET_BBLOCK (cfg, tblock, ip); \
537 link_bblock (cfg, cfg->cbb, tblock); \
538 ins->inst_false_bb = tblock; \
539 start_new_bblock = 2; \
541 if (sp != stack_start) { \
542 handle_stack_args (cfg, stack_start, sp - stack_start); \
543 CHECK_UNVERIFIABLE (cfg); \
545 MONO_ADD_INS (cfg->cbb, cmp); \
546 MONO_ADD_INS (cfg->cbb, ins); \
550 * link_bblock: Links two basic blocks
552 * links two basic blocks in the control flow graph, the 'from'
553 * argument is the starting block and the 'to' argument is the block
554 * the control flow ends to after 'from'.
557 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
559 MonoBasicBlock **newa;
563 if (from->cil_code) {
565 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
567 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
570 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
572 printf ("edge from entry to exit\n");
577 for (i = 0; i < from->out_count; ++i) {
578 if (to == from->out_bb [i]) {
584 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
585 for (i = 0; i < from->out_count; ++i) {
586 newa [i] = from->out_bb [i];
594 for (i = 0; i < to->in_count; ++i) {
595 if (from == to->in_bb [i]) {
601 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
602 for (i = 0; i < to->in_count; ++i) {
603 newa [i] = to->in_bb [i];
612 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
614 link_bblock (cfg, from, to);
618 * mono_find_block_region:
620 * We mark each basic block with a region ID. We use that to avoid BB
621 * optimizations when blocks are in different regions.
624 * A region token that encodes where this region is, and information
625 * about the clause owner for this block.
627 * The region encodes the try/catch/filter clause that owns this block
628 * as well as the type. -1 is a special value that represents a block
629 * that is in none of try/catch/filter.
632 mono_find_block_region (MonoCompile *cfg, int offset)
634 MonoMethodHeader *header = cfg->header;
635 MonoExceptionClause *clause;
638 for (i = 0; i < header->num_clauses; ++i) {
639 clause = &header->clauses [i];
640 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
641 (offset < (clause->handler_offset)))
642 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
644 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
645 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
646 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
647 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
648 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
650 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
653 for (i = 0; i < header->num_clauses; ++i) {
654 clause = &header->clauses [i];
656 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
657 return ((i + 1) << 8) | clause->flags;
664 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
666 MonoMethodHeader *header = cfg->header;
667 MonoExceptionClause *clause;
671 for (i = 0; i < header->num_clauses; ++i) {
672 clause = &header->clauses [i];
673 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
674 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
675 if (clause->flags == type)
676 res = g_list_append (res, clause);
683 mono_create_spvar_for_region (MonoCompile *cfg, int region)
687 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
691 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
692 /* prevent it from being register allocated */
693 var->flags |= MONO_INST_VOLATILE;
695 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
699 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
701 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
705 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
709 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
713 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
714 /* prevent it from being register allocated */
715 var->flags |= MONO_INST_VOLATILE;
717 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
723 * Returns the type used in the eval stack when @type is loaded.
724 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
727 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
731 type = mini_get_underlying_type (type);
732 inst->klass = klass = mono_class_from_mono_type (type);
734 inst->type = STACK_MP;
739 switch (type->type) {
741 inst->type = STACK_INV;
749 inst->type = STACK_I4;
754 case MONO_TYPE_FNPTR:
755 inst->type = STACK_PTR;
757 case MONO_TYPE_CLASS:
758 case MONO_TYPE_STRING:
759 case MONO_TYPE_OBJECT:
760 case MONO_TYPE_SZARRAY:
761 case MONO_TYPE_ARRAY:
762 inst->type = STACK_OBJ;
766 inst->type = STACK_I8;
769 inst->type = cfg->r4_stack_type;
772 inst->type = STACK_R8;
774 case MONO_TYPE_VALUETYPE:
775 if (type->data.klass->enumtype) {
776 type = mono_class_enum_basetype (type->data.klass);
780 inst->type = STACK_VTYPE;
783 case MONO_TYPE_TYPEDBYREF:
784 inst->klass = mono_defaults.typed_reference_class;
785 inst->type = STACK_VTYPE;
787 case MONO_TYPE_GENERICINST:
788 type = &type->data.generic_class->container_class->byval_arg;
792 g_assert (cfg->gshared);
793 if (mini_is_gsharedvt_type (type)) {
794 g_assert (cfg->gsharedvt);
795 inst->type = STACK_VTYPE;
797 type_to_eval_stack_type (cfg, mini_get_underlying_type (type), inst);
801 g_error ("unknown type 0x%02x in eval stack type", type->type);
806 * The following tables are used to quickly validate the IL code in type_from_op ().
809 bin_num_table [STACK_MAX] [STACK_MAX] = {
810 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
811 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
812 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
813 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
814 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R8},
815 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
816 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
817 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
818 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4}
823 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4
826 /* reduce the size of this table */
828 bin_int_table [STACK_MAX] [STACK_MAX] = {
829 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
830 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
831 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
832 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
833 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
834 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
835 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
836 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
840 bin_comp_table [STACK_MAX] [STACK_MAX] = {
841 /* Inv i L p F & O vt r4 */
843 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
844 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
845 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
846 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* F, R8 */
847 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
848 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
849 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
850 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* r, r4 */
853 /* reduce the size of this table */
855 shift_table [STACK_MAX] [STACK_MAX] = {
856 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
857 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
858 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
859 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
860 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
861 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
862 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
863 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
867 * Tables to map from the non-specific opcode to the matching
868 * type-specific opcode.
870 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
872 binops_op_map [STACK_MAX] = {
873 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD, 0, 0, OP_RADD-CEE_ADD
876 /* handles from CEE_NEG to CEE_CONV_U8 */
878 unops_op_map [STACK_MAX] = {
879 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG, 0, 0, OP_RNEG-CEE_NEG
882 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
884 ovfops_op_map [STACK_MAX] = {
885 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, 0, OP_RCONV_TO_U2-CEE_CONV_U2
888 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
890 ovf2ops_op_map [STACK_MAX] = {
891 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, 0, 0, OP_RCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
894 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
896 ovf3ops_op_map [STACK_MAX] = {
897 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, 0, 0, OP_RCONV_TO_OVF_I1-CEE_CONV_OVF_I1
900 /* handles from CEE_BEQ to CEE_BLT_UN */
902 beqops_op_map [STACK_MAX] = {
903 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, 0, OP_FBEQ-CEE_BEQ
906 /* handles from CEE_CEQ to CEE_CLT_UN */
908 ceqops_op_map [STACK_MAX] = {
909 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, 0, OP_RCEQ-OP_CEQ
913 * Sets ins->type (the type on the eval stack) according to the
914 * type of the opcode and the arguments to it.
915 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
917 * FIXME: this function sets ins->type unconditionally in some cases, but
918 * it should set it to invalid for some types (a conv.x on an object)
921 type_from_op (MonoCompile *cfg, MonoInst *ins, MonoInst *src1, MonoInst *src2)
923 switch (ins->opcode) {
930 /* FIXME: check unverifiable args for STACK_MP */
931 ins->type = bin_num_table [src1->type] [src2->type];
932 ins->opcode += binops_op_map [ins->type];
939 ins->type = bin_int_table [src1->type] [src2->type];
940 ins->opcode += binops_op_map [ins->type];
945 ins->type = shift_table [src1->type] [src2->type];
946 ins->opcode += binops_op_map [ins->type];
951 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
952 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
953 ins->opcode = OP_LCOMPARE;
954 else if (src1->type == STACK_R4)
955 ins->opcode = OP_RCOMPARE;
956 else if (src1->type == STACK_R8)
957 ins->opcode = OP_FCOMPARE;
959 ins->opcode = OP_ICOMPARE;
961 case OP_ICOMPARE_IMM:
962 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
963 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
964 ins->opcode = OP_LCOMPARE_IMM;
976 ins->opcode += beqops_op_map [src1->type];
979 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
980 ins->opcode += ceqops_op_map [src1->type];
986 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
987 ins->opcode += ceqops_op_map [src1->type];
991 ins->type = neg_table [src1->type];
992 ins->opcode += unops_op_map [ins->type];
995 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
996 ins->type = src1->type;
998 ins->type = STACK_INV;
999 ins->opcode += unops_op_map [ins->type];
1005 ins->type = STACK_I4;
1006 ins->opcode += unops_op_map [src1->type];
1009 ins->type = STACK_R8;
1010 switch (src1->type) {
1013 ins->opcode = OP_ICONV_TO_R_UN;
1016 ins->opcode = OP_LCONV_TO_R_UN;
1020 case CEE_CONV_OVF_I1:
1021 case CEE_CONV_OVF_U1:
1022 case CEE_CONV_OVF_I2:
1023 case CEE_CONV_OVF_U2:
1024 case CEE_CONV_OVF_I4:
1025 case CEE_CONV_OVF_U4:
1026 ins->type = STACK_I4;
1027 ins->opcode += ovf3ops_op_map [src1->type];
1029 case CEE_CONV_OVF_I_UN:
1030 case CEE_CONV_OVF_U_UN:
1031 ins->type = STACK_PTR;
1032 ins->opcode += ovf2ops_op_map [src1->type];
1034 case CEE_CONV_OVF_I1_UN:
1035 case CEE_CONV_OVF_I2_UN:
1036 case CEE_CONV_OVF_I4_UN:
1037 case CEE_CONV_OVF_U1_UN:
1038 case CEE_CONV_OVF_U2_UN:
1039 case CEE_CONV_OVF_U4_UN:
1040 ins->type = STACK_I4;
1041 ins->opcode += ovf2ops_op_map [src1->type];
1044 ins->type = STACK_PTR;
1045 switch (src1->type) {
1047 ins->opcode = OP_ICONV_TO_U;
1051 #if SIZEOF_VOID_P == 8
1052 ins->opcode = OP_LCONV_TO_U;
1054 ins->opcode = OP_MOVE;
1058 ins->opcode = OP_LCONV_TO_U;
1061 ins->opcode = OP_FCONV_TO_U;
1067 ins->type = STACK_I8;
1068 ins->opcode += unops_op_map [src1->type];
1070 case CEE_CONV_OVF_I8:
1071 case CEE_CONV_OVF_U8:
1072 ins->type = STACK_I8;
1073 ins->opcode += ovf3ops_op_map [src1->type];
1075 case CEE_CONV_OVF_U8_UN:
1076 case CEE_CONV_OVF_I8_UN:
1077 ins->type = STACK_I8;
1078 ins->opcode += ovf2ops_op_map [src1->type];
1081 ins->type = cfg->r4_stack_type;
1082 ins->opcode += unops_op_map [src1->type];
1085 ins->type = STACK_R8;
1086 ins->opcode += unops_op_map [src1->type];
1089 ins->type = STACK_R8;
1093 ins->type = STACK_I4;
1094 ins->opcode += ovfops_op_map [src1->type];
1097 case CEE_CONV_OVF_I:
1098 case CEE_CONV_OVF_U:
1099 ins->type = STACK_PTR;
1100 ins->opcode += ovfops_op_map [src1->type];
1103 case CEE_ADD_OVF_UN:
1105 case CEE_MUL_OVF_UN:
1107 case CEE_SUB_OVF_UN:
1108 ins->type = bin_num_table [src1->type] [src2->type];
1109 ins->opcode += ovfops_op_map [src1->type];
1110 if (ins->type == STACK_R8)
1111 ins->type = STACK_INV;
1113 case OP_LOAD_MEMBASE:
1114 ins->type = STACK_PTR;
1116 case OP_LOADI1_MEMBASE:
1117 case OP_LOADU1_MEMBASE:
1118 case OP_LOADI2_MEMBASE:
1119 case OP_LOADU2_MEMBASE:
1120 case OP_LOADI4_MEMBASE:
1121 case OP_LOADU4_MEMBASE:
1122 ins->type = STACK_PTR;
1124 case OP_LOADI8_MEMBASE:
1125 ins->type = STACK_I8;
1127 case OP_LOADR4_MEMBASE:
1128 ins->type = cfg->r4_stack_type;
1130 case OP_LOADR8_MEMBASE:
1131 ins->type = STACK_R8;
1134 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
1138 if (ins->type == STACK_MP)
1139 ins->klass = mono_defaults.object_class;
1144 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
1150 param_table [STACK_MAX] [STACK_MAX] = {
1155 check_values_to_signature (MonoInst *args, MonoType *this_ins, MonoMethodSignature *sig)
1160 switch (args->type) {
1170 for (i = 0; i < sig->param_count; ++i) {
1171 switch (args [i].type) {
1175 if (!sig->params [i]->byref)
1179 if (sig->params [i]->byref)
1181 switch (sig->params [i]->type) {
1182 case MONO_TYPE_CLASS:
1183 case MONO_TYPE_STRING:
1184 case MONO_TYPE_OBJECT:
1185 case MONO_TYPE_SZARRAY:
1186 case MONO_TYPE_ARRAY:
1193 if (sig->params [i]->byref)
1195 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1204 /*if (!param_table [args [i].type] [sig->params [i]->type])
1212 * When we need a pointer to the current domain many times in a method, we
1213 * call mono_domain_get() once and we store the result in a local variable.
1214 * This function returns the variable that represents the MonoDomain*.
1216 inline static MonoInst *
1217 mono_get_domainvar (MonoCompile *cfg)
1219 if (!cfg->domainvar)
1220 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1221 return cfg->domainvar;
1225 * The got_var contains the address of the Global Offset Table when AOT
1229 mono_get_got_var (MonoCompile *cfg)
1231 if (!cfg->compile_aot || !cfg->backend->need_got_var)
1233 if (!cfg->got_var) {
1234 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1236 return cfg->got_var;
1240 mono_get_vtable_var (MonoCompile *cfg)
1242 g_assert (cfg->gshared);
1244 if (!cfg->rgctx_var) {
1245 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1246 /* force the var to be stack allocated */
1247 cfg->rgctx_var->flags |= MONO_INST_VOLATILE;
1250 return cfg->rgctx_var;
1254 type_from_stack_type (MonoInst *ins) {
1255 switch (ins->type) {
1256 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1257 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1258 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1259 case STACK_R4: return &mono_defaults.single_class->byval_arg;
1260 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1262 return &ins->klass->this_arg;
1263 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1264 case STACK_VTYPE: return &ins->klass->byval_arg;
1266 g_error ("stack type %d to monotype not handled\n", ins->type);
1271 static G_GNUC_UNUSED int
1272 type_to_stack_type (MonoCompile *cfg, MonoType *t)
1274 t = mono_type_get_underlying_type (t);
1286 case MONO_TYPE_FNPTR:
1288 case MONO_TYPE_CLASS:
1289 case MONO_TYPE_STRING:
1290 case MONO_TYPE_OBJECT:
1291 case MONO_TYPE_SZARRAY:
1292 case MONO_TYPE_ARRAY:
1298 return cfg->r4_stack_type;
1301 case MONO_TYPE_VALUETYPE:
1302 case MONO_TYPE_TYPEDBYREF:
1304 case MONO_TYPE_GENERICINST:
1305 if (mono_type_generic_inst_is_valuetype (t))
1311 g_assert_not_reached ();
1318 array_access_to_klass (int opcode)
1322 return mono_defaults.byte_class;
1324 return mono_defaults.uint16_class;
1327 return mono_defaults.int_class;
1330 return mono_defaults.sbyte_class;
1333 return mono_defaults.int16_class;
1336 return mono_defaults.int32_class;
1338 return mono_defaults.uint32_class;
1341 return mono_defaults.int64_class;
1344 return mono_defaults.single_class;
1347 return mono_defaults.double_class;
1348 case CEE_LDELEM_REF:
1349 case CEE_STELEM_REF:
1350 return mono_defaults.object_class;
1352 g_assert_not_reached ();
1358 * We try to share variables when possible
1361 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1366 /* inlining can result in deeper stacks */
1367 if (slot >= cfg->header->max_stack)
1368 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1370 pos = ins->type - 1 + slot * STACK_MAX;
1372 switch (ins->type) {
1379 if ((vnum = cfg->intvars [pos]))
1380 return cfg->varinfo [vnum];
1381 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1382 cfg->intvars [pos] = res->inst_c0;
1385 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1391 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1394 * Don't use this if a generic_context is set, since that means AOT can't
1395 * look up the method using just the image+token.
1396 * table == 0 means this is a reference made from a wrapper.
1398 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1399 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1400 jump_info_token->image = image;
1401 jump_info_token->token = token;
1402 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1407 * This function is called to handle items that are left on the evaluation stack
1408 * at basic block boundaries. What happens is that we save the values to local variables
1409 * and we reload them later when first entering the target basic block (with the
1410 * handle_loaded_temps () function).
1411 * A single joint point will use the same variables (stored in the array bb->out_stack or
1412 * bb->in_stack, if the basic block is before or after the joint point).
1414 * This function needs to be called _before_ emitting the last instruction of
1415 * the bb (i.e. before emitting a branch).
1416 * If the stack merge fails at a join point, cfg->unverifiable is set.
1419 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1422 MonoBasicBlock *bb = cfg->cbb;
1423 MonoBasicBlock *outb;
1424 MonoInst *inst, **locals;
1429 if (cfg->verbose_level > 3)
1430 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1431 if (!bb->out_scount) {
1432 bb->out_scount = count;
1433 //printf ("bblock %d has out:", bb->block_num);
1435 for (i = 0; i < bb->out_count; ++i) {
1436 outb = bb->out_bb [i];
1437 /* exception handlers are linked, but they should not be considered for stack args */
1438 if (outb->flags & BB_EXCEPTION_HANDLER)
1440 //printf (" %d", outb->block_num);
1441 if (outb->in_stack) {
1443 bb->out_stack = outb->in_stack;
1449 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1450 for (i = 0; i < count; ++i) {
1452 * try to reuse temps already allocated for this purpouse, if they occupy the same
1453 * stack slot and if they are of the same type.
1454 * This won't cause conflicts since if 'local' is used to
1455 * store one of the values in the in_stack of a bblock, then
1456 * the same variable will be used for the same outgoing stack
1458 * This doesn't work when inlining methods, since the bblocks
1459 * in the inlined methods do not inherit their in_stack from
1460 * the bblock they are inlined to. See bug #58863 for an
1463 if (cfg->inlined_method)
1464 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1466 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1471 for (i = 0; i < bb->out_count; ++i) {
1472 outb = bb->out_bb [i];
1473 /* exception handlers are linked, but they should not be considered for stack args */
1474 if (outb->flags & BB_EXCEPTION_HANDLER)
1476 if (outb->in_scount) {
1477 if (outb->in_scount != bb->out_scount) {
1478 cfg->unverifiable = TRUE;
1481 continue; /* check they are the same locals */
1483 outb->in_scount = count;
1484 outb->in_stack = bb->out_stack;
1487 locals = bb->out_stack;
1489 for (i = 0; i < count; ++i) {
1490 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1491 inst->cil_code = sp [i]->cil_code;
1492 sp [i] = locals [i];
1493 if (cfg->verbose_level > 3)
1494 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1498 * It is possible that the out bblocks already have in_stack assigned, and
1499 * the in_stacks differ. In this case, we will store to all the different
1506 /* Find a bblock which has a different in_stack */
1508 while (bindex < bb->out_count) {
1509 outb = bb->out_bb [bindex];
1510 /* exception handlers are linked, but they should not be considered for stack args */
1511 if (outb->flags & BB_EXCEPTION_HANDLER) {
1515 if (outb->in_stack != locals) {
1516 for (i = 0; i < count; ++i) {
1517 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1518 inst->cil_code = sp [i]->cil_code;
1519 sp [i] = locals [i];
1520 if (cfg->verbose_level > 3)
1521 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1523 locals = outb->in_stack;
1533 emit_runtime_constant (MonoCompile *cfg, MonoJumpInfoType patch_type, gpointer data)
1537 if (cfg->compile_aot) {
1538 EMIT_NEW_AOTCONST (cfg, ins, patch_type, data);
1543 ji.type = patch_type;
1544 ji.data.target = data;
1545 target = mono_resolve_patch_target (NULL, cfg->domain, NULL, &ji, FALSE);
1547 EMIT_NEW_PCONST (cfg, ins, target);
1553 mini_emit_interface_bitmap_check (MonoCompile *cfg, int intf_bit_reg, int base_reg, int offset, MonoClass *klass)
1555 int ibitmap_reg = alloc_preg (cfg);
1556 #ifdef COMPRESSED_INTERFACE_BITMAP
1558 MonoInst *res, *ins;
1559 NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, ibitmap_reg, base_reg, offset);
1560 MONO_ADD_INS (cfg->cbb, ins);
1562 args [1] = emit_runtime_constant (cfg, MONO_PATCH_INFO_IID, klass);
1563 res = mono_emit_jit_icall (cfg, mono_class_interface_match, args);
1564 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, intf_bit_reg, res->dreg);
1566 int ibitmap_byte_reg = alloc_preg (cfg);
1568 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, base_reg, offset);
1570 if (cfg->compile_aot) {
1571 int iid_reg = alloc_preg (cfg);
1572 int shifted_iid_reg = alloc_preg (cfg);
1573 int ibitmap_byte_address_reg = alloc_preg (cfg);
1574 int masked_iid_reg = alloc_preg (cfg);
1575 int iid_one_bit_reg = alloc_preg (cfg);
1576 int iid_bit_reg = alloc_preg (cfg);
1577 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1578 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1579 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1580 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1581 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1582 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1583 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1584 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1586 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1587 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1593 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1594 * stored in "klass_reg" implements the interface "klass".
1597 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1599 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, interface_bitmap), klass);
1603 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1604 * stored in "vtable_reg" implements the interface "klass".
1607 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1609 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, interface_bitmap), klass);
1613 * Emit code which checks whenever the interface id of @klass is smaller than
1614 * than the value given by max_iid_reg.
1617 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1618 MonoBasicBlock *false_target)
1620 if (cfg->compile_aot) {
1621 int iid_reg = alloc_preg (cfg);
1622 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1623 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1626 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1628 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1630 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1633 /* Same as above, but obtains max_iid from a vtable */
1635 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1636 MonoBasicBlock *false_target)
1638 int max_iid_reg = alloc_preg (cfg);
1640 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, max_interface_id));
1641 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1644 /* Same as above, but obtains max_iid from a klass */
1646 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1647 MonoBasicBlock *false_target)
1649 int max_iid_reg = alloc_preg (cfg);
1651 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, max_interface_id));
1652 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1656 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1658 int idepth_reg = alloc_preg (cfg);
1659 int stypes_reg = alloc_preg (cfg);
1660 int stype = alloc_preg (cfg);
1662 mono_class_setup_supertypes (klass);
1664 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1665 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, idepth));
1666 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1667 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1669 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, supertypes));
1670 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1672 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1673 } else if (cfg->compile_aot) {
1674 int const_reg = alloc_preg (cfg);
1675 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1676 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1678 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1680 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1684 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1686 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1690 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1692 int intf_reg = alloc_preg (cfg);
1694 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1695 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1696 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1698 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1700 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1704 * Variant of the above that takes a register to the class, not the vtable.
1707 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1709 int intf_bit_reg = alloc_preg (cfg);
1711 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1712 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1713 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1715 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1717 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1721 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1724 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1726 MonoInst *ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_CLASS, klass);
1727 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, ins->dreg);
1729 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1733 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1735 mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1739 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1741 if (cfg->compile_aot) {
1742 int const_reg = alloc_preg (cfg);
1743 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1744 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1746 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1748 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1752 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1755 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1758 int rank_reg = alloc_preg (cfg);
1759 int eclass_reg = alloc_preg (cfg);
1761 g_assert (!klass_inst);
1762 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, rank));
1763 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1764 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1765 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
1766 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, cast_class));
1767 if (klass->cast_class == mono_defaults.object_class) {
1768 int parent_reg = alloc_preg (cfg);
1769 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, MONO_STRUCT_OFFSET (MonoClass, parent));
1770 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1771 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1772 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1773 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1774 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1775 } else if (klass->cast_class == mono_defaults.enum_class) {
1776 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1777 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1778 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1780 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1781 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1784 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1785 /* Check that the object is a vector too */
1786 int bounds_reg = alloc_preg (cfg);
1787 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, MONO_STRUCT_OFFSET (MonoArray, bounds));
1788 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1789 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1792 int idepth_reg = alloc_preg (cfg);
1793 int stypes_reg = alloc_preg (cfg);
1794 int stype = alloc_preg (cfg);
1796 mono_class_setup_supertypes (klass);
1798 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1799 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, idepth));
1800 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1801 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1803 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, supertypes));
1804 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1805 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1810 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1812 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1816 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1820 g_assert (val == 0);
1825 if ((size <= SIZEOF_REGISTER) && (size <= align)) {
1828 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1831 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1834 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1836 #if SIZEOF_REGISTER == 8
1838 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1844 val_reg = alloc_preg (cfg);
1846 if (SIZEOF_REGISTER == 8)
1847 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1849 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1852 /* This could be optimized further if neccesary */
1854 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1861 if (!cfg->backend->no_unaligned_access && SIZEOF_REGISTER == 8) {
1863 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1868 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1875 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1880 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1885 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1892 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1899 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1900 g_assert (size < 10000);
1903 /* This could be optimized further if neccesary */
1905 cur_reg = alloc_preg (cfg);
1906 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1907 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1914 if (!cfg->backend->no_unaligned_access && SIZEOF_REGISTER == 8) {
1916 cur_reg = alloc_preg (cfg);
1917 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1918 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1926 cur_reg = alloc_preg (cfg);
1927 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1928 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1934 cur_reg = alloc_preg (cfg);
1935 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1936 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1942 cur_reg = alloc_preg (cfg);
1943 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1944 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1952 emit_tls_set (MonoCompile *cfg, int sreg1, int tls_key)
1956 if (cfg->compile_aot) {
1957 EMIT_NEW_TLS_OFFSETCONST (cfg, c, tls_key);
1958 MONO_INST_NEW (cfg, ins, OP_TLS_SET_REG);
1960 ins->sreg2 = c->dreg;
1961 MONO_ADD_INS (cfg->cbb, ins);
1963 MONO_INST_NEW (cfg, ins, OP_TLS_SET);
1965 ins->inst_offset = mini_get_tls_offset (tls_key);
1966 MONO_ADD_INS (cfg->cbb, ins);
1973 * Emit IR to push the current LMF onto the LMF stack.
1976 emit_push_lmf (MonoCompile *cfg)
1979 * Emit IR to push the LMF:
1980 * lmf_addr = <lmf_addr from tls>
1981 * lmf->lmf_addr = lmf_addr
1982 * lmf->prev_lmf = *lmf_addr
1985 int lmf_reg, prev_lmf_reg;
1986 MonoInst *ins, *lmf_ins;
1991 if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
1992 /* Load current lmf */
1993 lmf_ins = mono_get_lmf_intrinsic (cfg);
1995 MONO_ADD_INS (cfg->cbb, lmf_ins);
1996 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
1997 lmf_reg = ins->dreg;
1998 /* Save previous_lmf */
1999 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), lmf_ins->dreg);
2001 emit_tls_set (cfg, lmf_reg, TLS_KEY_LMF);
2004 * Store lmf_addr in a variable, so it can be allocated to a global register.
2006 if (!cfg->lmf_addr_var)
2007 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2010 ins = mono_get_jit_tls_intrinsic (cfg);
2012 int jit_tls_dreg = ins->dreg;
2014 MONO_ADD_INS (cfg->cbb, ins);
2015 lmf_reg = alloc_preg (cfg);
2016 EMIT_NEW_BIALU_IMM (cfg, lmf_ins, OP_PADD_IMM, lmf_reg, jit_tls_dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
2018 lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
2021 lmf_ins = mono_get_lmf_addr_intrinsic (cfg);
2023 MONO_ADD_INS (cfg->cbb, lmf_ins);
2026 MonoInst *args [16], *jit_tls_ins, *ins;
2028 /* Inline mono_get_lmf_addr () */
2029 /* jit_tls = pthread_getspecific (mono_jit_tls_id); lmf_addr = &jit_tls->lmf; */
2031 /* Load mono_jit_tls_id */
2032 if (cfg->compile_aot)
2033 EMIT_NEW_AOTCONST (cfg, args [0], MONO_PATCH_INFO_JIT_TLS_ID, NULL);
2035 EMIT_NEW_ICONST (cfg, args [0], mono_jit_tls_id);
2036 /* call pthread_getspecific () */
2037 jit_tls_ins = mono_emit_jit_icall (cfg, pthread_getspecific, args);
2038 /* lmf_addr = &jit_tls->lmf */
2039 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, cfg->lmf_addr_var->dreg, jit_tls_ins->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
2042 lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
2046 lmf_ins->dreg = cfg->lmf_addr_var->dreg;
2048 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
2049 lmf_reg = ins->dreg;
2051 prev_lmf_reg = alloc_preg (cfg);
2052 /* Save previous_lmf */
2053 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, cfg->lmf_addr_var->dreg, 0);
2054 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), prev_lmf_reg);
2056 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, cfg->lmf_addr_var->dreg, 0, lmf_reg);
2063 * Emit IR to pop the current LMF from the LMF stack.
2066 emit_pop_lmf (MonoCompile *cfg)
2068 int lmf_reg, lmf_addr_reg, prev_lmf_reg;
2074 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
2075 lmf_reg = ins->dreg;
2077 if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
2078 /* Load previous_lmf */
2079 prev_lmf_reg = alloc_preg (cfg);
2080 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
2082 emit_tls_set (cfg, prev_lmf_reg, TLS_KEY_LMF);
2085 * Emit IR to pop the LMF:
2086 * *(lmf->lmf_addr) = lmf->prev_lmf
2088 /* This could be called before emit_push_lmf () */
2089 if (!cfg->lmf_addr_var)
2090 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2091 lmf_addr_reg = cfg->lmf_addr_var->dreg;
2093 prev_lmf_reg = alloc_preg (cfg);
2094 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
2095 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_addr_reg, 0, prev_lmf_reg);
2100 emit_instrumentation_call (MonoCompile *cfg, void *func)
2102 MonoInst *iargs [1];
2105 * Avoid instrumenting inlined methods since it can
2106 * distort profiling results.
2108 if (cfg->method != cfg->current_method)
2111 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE) {
2112 EMIT_NEW_METHODCONST (cfg, iargs [0], cfg->method);
2113 mono_emit_jit_icall (cfg, func, iargs);
2118 ret_type_to_call_opcode (MonoCompile *cfg, MonoType *type, int calli, int virt)
2121 type = mini_get_underlying_type (type);
2122 switch (type->type) {
2123 case MONO_TYPE_VOID:
2124 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALL_MEMBASE: OP_VOIDCALL;
2131 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2135 case MONO_TYPE_FNPTR:
2136 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2137 case MONO_TYPE_CLASS:
2138 case MONO_TYPE_STRING:
2139 case MONO_TYPE_OBJECT:
2140 case MONO_TYPE_SZARRAY:
2141 case MONO_TYPE_ARRAY:
2142 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2145 return calli? OP_LCALL_REG: virt? OP_LCALL_MEMBASE: OP_LCALL;
2148 return calli? OP_RCALL_REG: virt? OP_RCALL_MEMBASE: OP_RCALL;
2150 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
2152 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
2153 case MONO_TYPE_VALUETYPE:
2154 if (type->data.klass->enumtype) {
2155 type = mono_class_enum_basetype (type->data.klass);
2158 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2159 case MONO_TYPE_TYPEDBYREF:
2160 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2161 case MONO_TYPE_GENERICINST:
2162 type = &type->data.generic_class->container_class->byval_arg;
2165 case MONO_TYPE_MVAR:
2167 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2169 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
2175 * target_type_is_incompatible:
2176 * @cfg: MonoCompile context
2178 * Check that the item @arg on the evaluation stack can be stored
2179 * in the target type (can be a local, or field, etc).
2180 * The cfg arg can be used to check if we need verification or just
2183 * Returns: non-0 value if arg can't be stored on a target.
2186 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
2188 MonoType *simple_type;
2191 if (target->byref) {
2192 /* FIXME: check that the pointed to types match */
2193 if (arg->type == STACK_MP)
2194 return target->type != MONO_TYPE_I && arg->klass != mono_class_from_mono_type (target);
2195 if (arg->type == STACK_PTR)
2200 simple_type = mini_get_underlying_type (target);
2201 switch (simple_type->type) {
2202 case MONO_TYPE_VOID:
2210 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
2214 /* STACK_MP is needed when setting pinned locals */
2215 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2220 case MONO_TYPE_FNPTR:
2222 * Some opcodes like ldloca returns 'transient pointers' which can be stored in
2223 * in native int. (#688008).
2225 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2228 case MONO_TYPE_CLASS:
2229 case MONO_TYPE_STRING:
2230 case MONO_TYPE_OBJECT:
2231 case MONO_TYPE_SZARRAY:
2232 case MONO_TYPE_ARRAY:
2233 if (arg->type != STACK_OBJ)
2235 /* FIXME: check type compatibility */
2239 if (arg->type != STACK_I8)
2243 if (arg->type != cfg->r4_stack_type)
2247 if (arg->type != STACK_R8)
2250 case MONO_TYPE_VALUETYPE:
2251 if (arg->type != STACK_VTYPE)
2253 klass = mono_class_from_mono_type (simple_type);
2254 if (klass != arg->klass)
2257 case MONO_TYPE_TYPEDBYREF:
2258 if (arg->type != STACK_VTYPE)
2260 klass = mono_class_from_mono_type (simple_type);
2261 if (klass != arg->klass)
2264 case MONO_TYPE_GENERICINST:
2265 if (mono_type_generic_inst_is_valuetype (simple_type)) {
2266 if (arg->type != STACK_VTYPE)
2268 klass = mono_class_from_mono_type (simple_type);
2269 /* The second cases is needed when doing partial sharing */
2270 if (klass != arg->klass && mono_class_from_mono_type (target) != arg->klass)
2274 if (arg->type != STACK_OBJ)
2276 /* FIXME: check type compatibility */
2280 case MONO_TYPE_MVAR:
2281 g_assert (cfg->gshared);
2282 if (mini_type_var_is_vt (simple_type)) {
2283 if (arg->type != STACK_VTYPE)
2286 if (arg->type != STACK_OBJ)
2291 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
2297 * Prepare arguments for passing to a function call.
2298 * Return a non-zero value if the arguments can't be passed to the given
2300 * The type checks are not yet complete and some conversions may need
2301 * casts on 32 or 64 bit architectures.
2303 * FIXME: implement this using target_type_is_incompatible ()
2306 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
2308 MonoType *simple_type;
2312 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
2316 for (i = 0; i < sig->param_count; ++i) {
2317 if (sig->params [i]->byref) {
2318 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
2322 simple_type = mini_get_underlying_type (sig->params [i]);
2324 switch (simple_type->type) {
2325 case MONO_TYPE_VOID:
2334 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2340 case MONO_TYPE_FNPTR:
2341 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2344 case MONO_TYPE_CLASS:
2345 case MONO_TYPE_STRING:
2346 case MONO_TYPE_OBJECT:
2347 case MONO_TYPE_SZARRAY:
2348 case MONO_TYPE_ARRAY:
2349 if (args [i]->type != STACK_OBJ)
2354 if (args [i]->type != STACK_I8)
2358 if (args [i]->type != cfg->r4_stack_type)
2362 if (args [i]->type != STACK_R8)
2365 case MONO_TYPE_VALUETYPE:
2366 if (simple_type->data.klass->enumtype) {
2367 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2370 if (args [i]->type != STACK_VTYPE)
2373 case MONO_TYPE_TYPEDBYREF:
2374 if (args [i]->type != STACK_VTYPE)
2377 case MONO_TYPE_GENERICINST:
2378 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2381 case MONO_TYPE_MVAR:
2383 if (args [i]->type != STACK_VTYPE)
2387 g_error ("unknown type 0x%02x in check_call_signature",
2395 callvirt_to_call (int opcode)
2398 case OP_CALL_MEMBASE:
2400 case OP_VOIDCALL_MEMBASE:
2402 case OP_FCALL_MEMBASE:
2404 case OP_RCALL_MEMBASE:
2406 case OP_VCALL_MEMBASE:
2408 case OP_LCALL_MEMBASE:
2411 g_assert_not_reached ();
2418 callvirt_to_call_reg (int opcode)
2421 case OP_CALL_MEMBASE:
2423 case OP_VOIDCALL_MEMBASE:
2424 return OP_VOIDCALL_REG;
2425 case OP_FCALL_MEMBASE:
2426 return OP_FCALL_REG;
2427 case OP_RCALL_MEMBASE:
2428 return OP_RCALL_REG;
2429 case OP_VCALL_MEMBASE:
2430 return OP_VCALL_REG;
2431 case OP_LCALL_MEMBASE:
2432 return OP_LCALL_REG;
2434 g_assert_not_reached ();
2440 /* Either METHOD or IMT_ARG needs to be set */
2442 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoMethod *method, MonoInst *imt_arg)
2446 if (COMPILE_LLVM (cfg)) {
2448 method_reg = alloc_preg (cfg);
2449 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2451 MonoInst *ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_METHODCONST, method);
2452 method_reg = ins->dreg;
2456 call->imt_arg_reg = method_reg;
2458 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2463 method_reg = alloc_preg (cfg);
2464 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2466 MonoInst *ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_METHODCONST, method);
2467 method_reg = ins->dreg;
2470 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2473 static MonoJumpInfo *
2474 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2476 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2480 ji->data.target = target;
2486 mini_class_check_context_used (MonoCompile *cfg, MonoClass *klass)
2489 return mono_class_check_context_used (klass);
2495 mini_method_check_context_used (MonoCompile *cfg, MonoMethod *method)
2498 return mono_method_check_context_used (method);
2504 * check_method_sharing:
2506 * Check whenever the vtable or an mrgctx needs to be passed when calling CMETHOD.
2509 check_method_sharing (MonoCompile *cfg, MonoMethod *cmethod, gboolean *out_pass_vtable, gboolean *out_pass_mrgctx)
2511 gboolean pass_vtable = FALSE;
2512 gboolean pass_mrgctx = FALSE;
2514 if (((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
2515 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
2516 gboolean sharable = FALSE;
2518 if (mono_method_is_generic_sharable_full (cmethod, TRUE, TRUE, TRUE))
2522 * Pass vtable iff target method might
2523 * be shared, which means that sharing
2524 * is enabled for its class and its
2525 * context is sharable (and it's not a
2528 if (sharable && !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
2532 if (mini_method_get_context (cmethod) &&
2533 mini_method_get_context (cmethod)->method_inst) {
2534 g_assert (!pass_vtable);
2536 if (mono_method_is_generic_sharable_full (cmethod, TRUE, TRUE, TRUE)) {
2539 if (cfg->gsharedvt && mini_is_gsharedvt_signature (mono_method_signature (cmethod)))
2544 if (out_pass_vtable)
2545 *out_pass_vtable = pass_vtable;
2546 if (out_pass_mrgctx)
2547 *out_pass_mrgctx = pass_mrgctx;
2550 inline static MonoCallInst *
2551 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2552 MonoInst **args, int calli, int virtual, int tail, int rgctx, int unbox_trampoline)
2556 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2564 emit_instrumentation_call (cfg, mono_profiler_method_leave);
2566 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2568 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (cfg, sig->ret, calli, virtual));
2571 call->signature = sig;
2572 call->rgctx_reg = rgctx;
2573 sig_ret = mini_get_underlying_type (sig->ret);
2575 type_to_eval_stack_type ((cfg), sig_ret, &call->inst);
2578 if (mini_type_is_vtype (sig_ret)) {
2579 call->vret_var = cfg->vret_addr;
2580 //g_assert_not_reached ();
2582 } else if (mini_type_is_vtype (sig_ret)) {
2583 MonoInst *temp = mono_compile_create_var (cfg, sig_ret, OP_LOCAL);
2586 temp->backend.is_pinvoke = sig->pinvoke;
2589 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2590 * address of return value to increase optimization opportunities.
2591 * Before vtype decomposition, the dreg of the call ins itself represents the
2592 * fact the call modifies the return value. After decomposition, the call will
2593 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2594 * will be transformed into an LDADDR.
2596 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2597 loada->dreg = alloc_preg (cfg);
2598 loada->inst_p0 = temp;
2599 /* We reference the call too since call->dreg could change during optimization */
2600 loada->inst_p1 = call;
2601 MONO_ADD_INS (cfg->cbb, loada);
2603 call->inst.dreg = temp->dreg;
2605 call->vret_var = loada;
2606 } else if (!MONO_TYPE_IS_VOID (sig_ret))
2607 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2609 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2610 if (COMPILE_SOFT_FLOAT (cfg)) {
2612 * If the call has a float argument, we would need to do an r8->r4 conversion using
2613 * an icall, but that cannot be done during the call sequence since it would clobber
2614 * the call registers + the stack. So we do it before emitting the call.
2616 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2618 MonoInst *in = call->args [i];
2620 if (i >= sig->hasthis)
2621 t = sig->params [i - sig->hasthis];
2623 t = &mono_defaults.int_class->byval_arg;
2624 t = mono_type_get_underlying_type (t);
2626 if (!t->byref && t->type == MONO_TYPE_R4) {
2627 MonoInst *iargs [1];
2631 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2633 /* The result will be in an int vreg */
2634 call->args [i] = conv;
2640 call->need_unbox_trampoline = unbox_trampoline;
2643 if (COMPILE_LLVM (cfg))
2644 mono_llvm_emit_call (cfg, call);
2646 mono_arch_emit_call (cfg, call);
2648 mono_arch_emit_call (cfg, call);
2651 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2652 cfg->flags |= MONO_CFG_HAS_CALLS;
2658 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2660 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2661 cfg->uses_rgctx_reg = TRUE;
2662 call->rgctx_reg = TRUE;
2664 call->rgctx_arg_reg = rgctx_reg;
2668 inline static MonoInst*
2669 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *imt_arg, MonoInst *rgctx_arg)
2674 gboolean check_sp = FALSE;
2676 if (cfg->check_pinvoke_callconv && cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
2677 WrapperInfo *info = mono_marshal_get_wrapper_info (cfg->method);
2679 if (info && info->subtype == WRAPPER_SUBTYPE_PINVOKE)
2684 rgctx_reg = mono_alloc_preg (cfg);
2685 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2689 if (!cfg->stack_inbalance_var)
2690 cfg->stack_inbalance_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2692 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2693 ins->dreg = cfg->stack_inbalance_var->dreg;
2694 MONO_ADD_INS (cfg->cbb, ins);
2697 call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE, rgctx_arg ? TRUE : FALSE, FALSE);
2699 call->inst.sreg1 = addr->dreg;
2702 emit_imt_argument (cfg, call, NULL, imt_arg);
2704 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2709 sp_reg = mono_alloc_preg (cfg);
2711 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2713 MONO_ADD_INS (cfg->cbb, ins);
2715 /* Restore the stack so we don't crash when throwing the exception */
2716 MONO_INST_NEW (cfg, ins, OP_SET_SP);
2717 ins->sreg1 = cfg->stack_inbalance_var->dreg;
2718 MONO_ADD_INS (cfg->cbb, ins);
2720 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, cfg->stack_inbalance_var->dreg, sp_reg);
2721 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ExecutionEngineException");
2725 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2727 return (MonoInst*)call;
2731 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2734 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type);
2736 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2739 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig, gboolean tail,
2740 MonoInst **args, MonoInst *this_ins, MonoInst *imt_arg, MonoInst *rgctx_arg)
2742 #ifndef DISABLE_REMOTING
2743 gboolean might_be_remote = FALSE;
2745 gboolean virtual = this_ins != NULL;
2746 gboolean enable_for_aot = TRUE;
2749 MonoInst *call_target = NULL;
2751 gboolean need_unbox_trampoline;
2754 sig = mono_method_signature (method);
2756 if (cfg->llvm_only && (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
2757 MonoInst *icall_args [16];
2760 // FIXME: Optimize this
2762 guint32 imt_slot = mono_method_get_imt_slot (method);
2764 icall_args [0] = this_ins;
2765 EMIT_NEW_ICONST (cfg, icall_args [1], imt_slot);
2767 icall_args [2] = imt_arg;
2769 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_METHODCONST, method);
2770 icall_args [2] = ins;
2772 EMIT_NEW_PCONST (cfg, icall_args [3], NULL);
2774 call_target = mono_emit_jit_icall (cfg, mono_resolve_iface_call, icall_args);
2778 rgctx_reg = mono_alloc_preg (cfg);
2779 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2782 if (method->string_ctor) {
2783 /* Create the real signature */
2784 /* FIXME: Cache these */
2785 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2786 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2791 context_used = mini_method_check_context_used (cfg, method);
2793 #ifndef DISABLE_REMOTING
2794 might_be_remote = this_ins && sig->hasthis &&
2795 (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) &&
2796 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && (!MONO_CHECK_THIS (this_ins) || context_used);
2798 if (might_be_remote && context_used) {
2801 g_assert (cfg->gshared);
2803 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2805 return mono_emit_calli (cfg, sig, args, addr, NULL, NULL);
2809 if (cfg->llvm_only && !call_target && virtual && (method->flags & METHOD_ATTRIBUTE_VIRTUAL)) {
2810 // FIXME: Vcall optimizations below
2811 MonoInst *icall_args [16];
2814 if (sig->generic_param_count) {
2816 * Generic virtual call, pass the concrete method as the imt argument.
2818 imt_arg = emit_get_rgctx_method (cfg, context_used,
2819 method, MONO_RGCTX_INFO_METHOD);
2822 // FIXME: Optimize this
2824 int slot = mono_method_get_vtable_index (method);
2826 icall_args [0] = this_ins;
2827 EMIT_NEW_ICONST (cfg, icall_args [1], slot);
2829 icall_args [2] = imt_arg;
2831 EMIT_NEW_PCONST (cfg, ins, NULL);
2832 icall_args [2] = ins;
2834 call_target = mono_emit_jit_icall (cfg, mono_resolve_vcall, icall_args);
2837 need_unbox_trampoline = method->klass == mono_defaults.object_class || (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE);
2839 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, tail, rgctx_arg ? TRUE : FALSE, need_unbox_trampoline);
2841 #ifndef DISABLE_REMOTING
2842 if (might_be_remote)
2843 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2846 call->method = method;
2847 call->inst.flags |= MONO_INST_HAS_METHOD;
2848 call->inst.inst_left = this_ins;
2849 call->tail_call = tail;
2852 int vtable_reg, slot_reg, this_reg;
2855 this_reg = this_ins->dreg;
2857 if (!cfg->llvm_only && (method->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (method->name, "Invoke")) {
2858 MonoInst *dummy_use;
2860 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2862 /* Make a call to delegate->invoke_impl */
2863 call->inst.inst_basereg = this_reg;
2864 call->inst.inst_offset = MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2865 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2867 /* We must emit a dummy use here because the delegate trampoline will
2868 replace the 'this' argument with the delegate target making this activation
2869 no longer a root for the delegate.
2870 This is an issue for delegates that target collectible code such as dynamic
2871 methods of GC'able assemblies.
2873 For a test case look into #667921.
2875 FIXME: a dummy use is not the best way to do it as the local register allocator
2876 will put it on a caller save register and spil it around the call.
2877 Ideally, we would either put it on a callee save register or only do the store part.
2879 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [0]);
2881 return (MonoInst*)call;
2884 if ((!cfg->compile_aot || enable_for_aot) &&
2885 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2886 (MONO_METHOD_IS_FINAL (method) &&
2887 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2888 !(mono_class_is_marshalbyref (method->klass) && context_used)) {
2890 * the method is not virtual, we just need to ensure this is not null
2891 * and then we can call the method directly.
2893 #ifndef DISABLE_REMOTING
2894 if (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) {
2896 * The check above ensures method is not gshared, this is needed since
2897 * gshared methods can't have wrappers.
2899 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2903 if (!method->string_ctor)
2904 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2906 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2907 } else if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2909 * the method is virtual, but we can statically dispatch since either
2910 * it's class or the method itself are sealed.
2911 * But first we need to ensure it's not a null reference.
2913 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2915 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2916 } else if (call_target) {
2917 vtable_reg = alloc_preg (cfg);
2918 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2920 call->inst.opcode = callvirt_to_call_reg (call->inst.opcode);
2921 call->inst.sreg1 = call_target->dreg;
2922 call->inst.flags &= !MONO_INST_HAS_METHOD;
2924 vtable_reg = alloc_preg (cfg);
2925 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2926 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2927 guint32 imt_slot = mono_method_get_imt_slot (method);
2928 emit_imt_argument (cfg, call, call->method, imt_arg);
2929 slot_reg = vtable_reg;
2930 offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2932 slot_reg = vtable_reg;
2933 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) +
2934 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2936 g_assert (mono_method_signature (method)->generic_param_count);
2937 emit_imt_argument (cfg, call, call->method, imt_arg);
2941 call->inst.sreg1 = slot_reg;
2942 call->inst.inst_offset = offset;
2943 call->is_virtual = TRUE;
2947 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2950 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2952 return (MonoInst*)call;
2956 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this_ins)
2958 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), FALSE, args, this_ins, NULL, NULL);
2962 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2969 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE, FALSE, FALSE);
2972 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2974 return (MonoInst*)call;
2978 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2980 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2984 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2988 * mono_emit_abs_call:
2990 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2992 inline static MonoInst*
2993 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2994 MonoMethodSignature *sig, MonoInst **args)
2996 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
3000 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
3003 if (cfg->abs_patches == NULL)
3004 cfg->abs_patches = g_hash_table_new (NULL, NULL);
3005 g_hash_table_insert (cfg->abs_patches, ji, ji);
3006 ins = mono_emit_native_call (cfg, ji, sig, args);
3007 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
3012 direct_icalls_enabled (MonoCompile *cfg)
3014 /* LLVM on amd64 can't handle calls to non-32 bit addresses */
3016 if (cfg->compile_llvm)
3019 if (cfg->gen_sdb_seq_points || cfg->disable_direct_icalls)
3025 mono_emit_jit_icall_by_info (MonoCompile *cfg, MonoJitICallInfo *info, MonoInst **args)
3028 * Call the jit icall without a wrapper if possible.
3029 * The wrapper is needed for the following reasons:
3030 * - to handle exceptions thrown using mono_raise_exceptions () from the
3031 * icall function. The EH code needs the lmf frame pushed by the
3032 * wrapper to be able to unwind back to managed code.
3033 * - to be able to do stack walks for asynchronously suspended
3034 * threads when debugging.
3036 if (info->no_raise && direct_icalls_enabled (cfg)) {
3040 if (!info->wrapper_method) {
3041 name = g_strdup_printf ("__icall_wrapper_%s", info->name);
3042 info->wrapper_method = mono_marshal_get_icall_wrapper (info->sig, name, info->func, TRUE);
3044 mono_memory_barrier ();
3048 * Inline the wrapper method, which is basically a call to the C icall, and
3049 * an exception check.
3051 costs = inline_method (cfg, info->wrapper_method, NULL,
3052 args, NULL, cfg->real_offset, TRUE);
3053 g_assert (costs > 0);
3054 g_assert (!MONO_TYPE_IS_VOID (info->sig->ret));
3058 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
3063 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
3065 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
3066 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
3070 * Native code might return non register sized integers
3071 * without initializing the upper bits.
3073 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
3074 case OP_LOADI1_MEMBASE:
3075 widen_op = OP_ICONV_TO_I1;
3077 case OP_LOADU1_MEMBASE:
3078 widen_op = OP_ICONV_TO_U1;
3080 case OP_LOADI2_MEMBASE:
3081 widen_op = OP_ICONV_TO_I2;
3083 case OP_LOADU2_MEMBASE:
3084 widen_op = OP_ICONV_TO_U2;
3090 if (widen_op != -1) {
3091 int dreg = alloc_preg (cfg);
3094 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
3095 widen->type = ins->type;
3105 get_memcpy_method (void)
3107 static MonoMethod *memcpy_method = NULL;
3108 if (!memcpy_method) {
3109 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
3111 g_error ("Old corlib found. Install a new one");
3113 return memcpy_method;
3117 create_write_barrier_bitmap (MonoCompile *cfg, MonoClass *klass, unsigned *wb_bitmap, int offset)
3119 MonoClassField *field;
3120 gpointer iter = NULL;
3122 while ((field = mono_class_get_fields (klass, &iter))) {
3125 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
3127 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
3128 if (mini_type_is_reference (mono_field_get_type (field))) {
3129 g_assert ((foffset % SIZEOF_VOID_P) == 0);
3130 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
3132 MonoClass *field_class = mono_class_from_mono_type (field->type);
3133 if (field_class->has_references)
3134 create_write_barrier_bitmap (cfg, field_class, wb_bitmap, offset + foffset);
3140 emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value)
3142 int card_table_shift_bits;
3143 gpointer card_table_mask;
3145 MonoInst *dummy_use;
3146 int nursery_shift_bits;
3147 size_t nursery_size;
3149 if (!cfg->gen_write_barriers)
3152 card_table = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
3154 mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
3156 if (cfg->backend->have_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0 && !COMPILE_LLVM (cfg)) {
3159 MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
3160 wbarrier->sreg1 = ptr->dreg;
3161 wbarrier->sreg2 = value->dreg;
3162 MONO_ADD_INS (cfg->cbb, wbarrier);
3163 } else if (card_table && !cfg->compile_aot && !mono_gc_card_table_nursery_check ()) {
3164 int offset_reg = alloc_preg (cfg);
3168 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
3169 if (card_table_mask)
3170 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
3172 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
3173 * IMM's larger than 32bits.
3175 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR, NULL);
3176 card_reg = ins->dreg;
3178 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
3179 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
3181 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
3182 mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
3185 EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
3189 mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
3191 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
3192 unsigned need_wb = 0;
3197 /*types with references can't have alignment smaller than sizeof(void*) */
3198 if (align < SIZEOF_VOID_P)
3201 /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
3202 if (size > 32 * SIZEOF_VOID_P)
3205 create_write_barrier_bitmap (cfg, klass, &need_wb, 0);
3207 /* We don't unroll more than 5 stores to avoid code bloat. */
3208 if (size > 5 * SIZEOF_VOID_P) {
3209 /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
3210 size += (SIZEOF_VOID_P - 1);
3211 size &= ~(SIZEOF_VOID_P - 1);
3213 EMIT_NEW_ICONST (cfg, iargs [2], size);
3214 EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
3215 mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
3219 destreg = iargs [0]->dreg;
3220 srcreg = iargs [1]->dreg;
3223 dest_ptr_reg = alloc_preg (cfg);
3224 tmp_reg = alloc_preg (cfg);
3227 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
3229 while (size >= SIZEOF_VOID_P) {
3230 MonoInst *load_inst;
3231 MONO_INST_NEW (cfg, load_inst, OP_LOAD_MEMBASE);
3232 load_inst->dreg = tmp_reg;
3233 load_inst->inst_basereg = srcreg;
3234 load_inst->inst_offset = offset;
3235 MONO_ADD_INS (cfg->cbb, load_inst);
3237 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
3240 emit_write_barrier (cfg, iargs [0], load_inst);
3242 offset += SIZEOF_VOID_P;
3243 size -= SIZEOF_VOID_P;
3246 /*tmp += sizeof (void*)*/
3247 if (size >= SIZEOF_VOID_P) {
3248 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
3249 MONO_ADD_INS (cfg->cbb, iargs [0]);
3253 /* Those cannot be references since size < sizeof (void*) */
3255 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
3256 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
3262 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
3263 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
3269 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
3270 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
3279 * Emit code to copy a valuetype of type @klass whose address is stored in
3280 * @src->dreg to memory whose address is stored at @dest->dreg.
3283 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
3285 MonoInst *iargs [4];
3288 MonoMethod *memcpy_method;
3289 MonoInst *size_ins = NULL;
3290 MonoInst *memcpy_ins = NULL;
3294 klass = mono_class_from_mono_type (mini_get_underlying_type (&klass->byval_arg));
3297 * This check breaks with spilled vars... need to handle it during verification anyway.
3298 * g_assert (klass && klass == src->klass && klass == dest->klass);
3301 if (mini_is_gsharedvt_klass (klass)) {
3303 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3304 memcpy_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_MEMCPY);
3308 n = mono_class_native_size (klass, &align);
3310 n = mono_class_value_size (klass, &align);
3312 /* if native is true there should be no references in the struct */
3313 if (cfg->gen_write_barriers && (klass->has_references || size_ins) && !native) {
3314 /* Avoid barriers when storing to the stack */
3315 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
3316 (dest->opcode == OP_LDADDR))) {
3322 context_used = mini_class_check_context_used (cfg, klass);
3324 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
3325 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
3327 } else if (context_used) {
3328 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3330 iargs [2] = emit_runtime_constant (cfg, MONO_PATCH_INFO_CLASS, klass);
3331 if (!cfg->compile_aot)
3332 mono_class_compute_gc_descriptor (klass);
3336 mono_emit_jit_icall (cfg, mono_gsharedvt_value_copy, iargs);
3338 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
3343 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 8) {
3344 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
3345 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
3350 iargs [2] = size_ins;
3352 EMIT_NEW_ICONST (cfg, iargs [2], n);
3354 memcpy_method = get_memcpy_method ();
3356 mono_emit_calli (cfg, mono_method_signature (memcpy_method), iargs, memcpy_ins, NULL, NULL);
3358 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
3363 get_memset_method (void)
3365 static MonoMethod *memset_method = NULL;
3366 if (!memset_method) {
3367 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
3369 g_error ("Old corlib found. Install a new one");
3371 return memset_method;
3375 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
3377 MonoInst *iargs [3];
3380 MonoMethod *memset_method;
3381 MonoInst *size_ins = NULL;
3382 MonoInst *bzero_ins = NULL;
3383 static MonoMethod *bzero_method;
3385 /* FIXME: Optimize this for the case when dest is an LDADDR */
3386 mono_class_init (klass);
3387 if (mini_is_gsharedvt_klass (klass)) {
3388 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3389 bzero_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_BZERO);
3391 bzero_method = mono_class_get_method_from_name (mono_defaults.string_class, "bzero_aligned_1", 2);
3392 g_assert (bzero_method);
3394 iargs [1] = size_ins;
3395 mono_emit_calli (cfg, mono_method_signature (bzero_method), iargs, bzero_ins, NULL, NULL);
3399 n = mono_class_value_size (klass, &align);
3401 if (n <= sizeof (gpointer) * 8) {
3402 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
3405 memset_method = get_memset_method ();
3407 EMIT_NEW_ICONST (cfg, iargs [1], 0);
3408 EMIT_NEW_ICONST (cfg, iargs [2], n);
3409 mono_emit_method_call (cfg, memset_method, iargs, NULL);
3416 * Emit IR to return either the this pointer for instance method,
3417 * or the mrgctx for static methods.
3420 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
3422 MonoInst *this_ins = NULL;
3424 g_assert (cfg->gshared);
3426 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
3427 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
3428 !method->klass->valuetype)
3429 EMIT_NEW_ARGLOAD (cfg, this_ins, 0);
3431 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
3432 MonoInst *mrgctx_loc, *mrgctx_var;
3434 g_assert (!this_ins);
3435 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
3437 mrgctx_loc = mono_get_vtable_var (cfg);
3438 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
3441 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
3442 MonoInst *vtable_loc, *vtable_var;
3444 g_assert (!this_ins);
3446 vtable_loc = mono_get_vtable_var (cfg);
3447 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
3449 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
3450 MonoInst *mrgctx_var = vtable_var;
3453 vtable_reg = alloc_preg (cfg);
3454 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, MONO_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
3455 vtable_var->type = STACK_PTR;
3463 vtable_reg = alloc_preg (cfg);
3464 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this_ins->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3469 static MonoJumpInfoRgctxEntry *
3470 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, MonoRgctxInfoType info_type)
3472 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
3473 res->method = method;
3474 res->in_mrgctx = in_mrgctx;
3475 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
3476 res->data->type = patch_type;
3477 res->data->data.target = patch_data;
3478 res->info_type = info_type;
3483 static inline MonoInst*
3484 emit_rgctx_fetch_inline (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3486 MonoInst *args [16];
3489 // FIXME: No fastpath since the slot is not a compile time constant
3491 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_RGCTX_SLOT_INDEX, entry);
3492 if (entry->in_mrgctx)
3493 call = mono_emit_jit_icall (cfg, mono_fill_method_rgctx, args);
3495 call = mono_emit_jit_icall (cfg, mono_fill_class_rgctx, args);
3499 * FIXME: This can be called during decompose, which is a problem since it creates
3501 * Also, the fastpath doesn't work since the slot number is dynamically allocated.
3503 int i, slot, depth, index, rgctx_reg, val_reg, res_reg;
3505 MonoBasicBlock *is_null_bb, *end_bb;
3506 MonoInst *res, *ins, *call;
3509 slot = mini_get_rgctx_entry_slot (entry);
3511 mrgctx = MONO_RGCTX_SLOT_IS_MRGCTX (slot);
3512 index = MONO_RGCTX_SLOT_INDEX (slot);
3514 index += MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT / sizeof (gpointer);
3515 for (depth = 0; ; ++depth) {
3516 int size = mono_class_rgctx_get_array_size (depth, mrgctx);
3518 if (index < size - 1)
3523 NEW_BBLOCK (cfg, end_bb);
3524 NEW_BBLOCK (cfg, is_null_bb);
3527 rgctx_reg = rgctx->dreg;
3529 rgctx_reg = alloc_preg (cfg);
3531 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, rgctx_reg, rgctx->dreg, MONO_STRUCT_OFFSET (MonoVTable, runtime_generic_context));
3532 // FIXME: Avoid this check by allocating the table when the vtable is created etc.
3533 NEW_BBLOCK (cfg, is_null_bb);
3535 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rgctx_reg, 0);
3536 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3539 for (i = 0; i < depth; ++i) {
3540 int array_reg = alloc_preg (cfg);
3542 /* load ptr to next array */
3543 if (mrgctx && i == 0)
3544 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, rgctx_reg, MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT);
3546 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, rgctx_reg, 0);
3547 rgctx_reg = array_reg;
3548 /* is the ptr null? */
3549 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rgctx_reg, 0);
3550 /* if yes, jump to actual trampoline */
3551 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3555 val_reg = alloc_preg (cfg);
3556 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, val_reg, rgctx_reg, (index + 1) * sizeof (gpointer));
3557 /* is the slot null? */
3558 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, val_reg, 0);
3559 /* if yes, jump to actual trampoline */
3560 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3563 res_reg = alloc_preg (cfg);
3564 MONO_INST_NEW (cfg, ins, OP_MOVE);
3565 ins->dreg = res_reg;
3566 ins->sreg1 = val_reg;
3567 MONO_ADD_INS (cfg->cbb, ins);
3569 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3572 MONO_START_BB (cfg, is_null_bb);
3574 EMIT_NEW_ICONST (cfg, args [1], index);
3576 call = mono_emit_jit_icall (cfg, mono_fill_method_rgctx, args);
3578 call = mono_emit_jit_icall (cfg, mono_fill_class_rgctx, args);
3579 MONO_INST_NEW (cfg, ins, OP_MOVE);
3580 ins->dreg = res_reg;
3581 ins->sreg1 = call->dreg;
3582 MONO_ADD_INS (cfg->cbb, ins);
3583 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3585 MONO_START_BB (cfg, end_bb);
3594 * Emit IR to load the value of the rgctx entry ENTRY from the rgctx
3597 static inline MonoInst*
3598 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3601 return emit_rgctx_fetch_inline (cfg, rgctx, entry);
3603 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
3607 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
3608 MonoClass *klass, MonoRgctxInfoType rgctx_type)
3610 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
3611 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3613 return emit_rgctx_fetch (cfg, rgctx, entry);
3617 emit_get_rgctx_sig (MonoCompile *cfg, int context_used,
3618 MonoMethodSignature *sig, MonoRgctxInfoType rgctx_type)
3620 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_SIGNATURE, sig, rgctx_type);
3621 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3623 return emit_rgctx_fetch (cfg, rgctx, entry);
3627 emit_get_rgctx_gsharedvt_call (MonoCompile *cfg, int context_used,
3628 MonoMethodSignature *sig, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3630 MonoJumpInfoGSharedVtCall *call_info;
3631 MonoJumpInfoRgctxEntry *entry;
3634 call_info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoGSharedVtCall));
3635 call_info->sig = sig;
3636 call_info->method = cmethod;
3638 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_CALL, call_info, rgctx_type);
3639 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3641 return emit_rgctx_fetch (cfg, rgctx, entry);
3645 * emit_get_rgctx_virt_method:
3647 * Return data for method VIRT_METHOD for a receiver of type KLASS.
3650 emit_get_rgctx_virt_method (MonoCompile *cfg, int context_used,
3651 MonoClass *klass, MonoMethod *virt_method, MonoRgctxInfoType rgctx_type)
3653 MonoJumpInfoVirtMethod *info;
3654 MonoJumpInfoRgctxEntry *entry;
3657 info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoVirtMethod));
3658 info->klass = klass;
3659 info->method = virt_method;
3661 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_VIRT_METHOD, info, rgctx_type);
3662 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3664 return emit_rgctx_fetch (cfg, rgctx, entry);
3668 emit_get_rgctx_gsharedvt_method (MonoCompile *cfg, int context_used,
3669 MonoMethod *cmethod, MonoGSharedVtMethodInfo *info)
3671 MonoJumpInfoRgctxEntry *entry;
3674 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_METHOD, info, MONO_RGCTX_INFO_METHOD_GSHAREDVT_INFO);
3675 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3677 return emit_rgctx_fetch (cfg, rgctx, entry);
3681 * emit_get_rgctx_method:
3683 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
3684 * normal constants, else emit a load from the rgctx.
3687 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
3688 MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3690 if (!context_used) {
3693 switch (rgctx_type) {
3694 case MONO_RGCTX_INFO_METHOD:
3695 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
3697 case MONO_RGCTX_INFO_METHOD_RGCTX:
3698 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
3701 g_assert_not_reached ();
3704 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
3705 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3707 return emit_rgctx_fetch (cfg, rgctx, entry);
3712 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
3713 MonoClassField *field, MonoRgctxInfoType rgctx_type)
3715 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
3716 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3718 return emit_rgctx_fetch (cfg, rgctx, entry);
3722 get_gsharedvt_info_slot (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3724 MonoGSharedVtMethodInfo *info = cfg->gsharedvt_info;
3725 MonoRuntimeGenericContextInfoTemplate *template;
3730 for (i = 0; i < info->num_entries; ++i) {
3731 MonoRuntimeGenericContextInfoTemplate *otemplate = &info->entries [i];
3733 if (otemplate->info_type == rgctx_type && otemplate->data == data && rgctx_type != MONO_RGCTX_INFO_LOCAL_OFFSET)
3737 if (info->num_entries == info->count_entries) {
3738 MonoRuntimeGenericContextInfoTemplate *new_entries;
3739 int new_count_entries = info->count_entries ? info->count_entries * 2 : 16;
3741 new_entries = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * new_count_entries);
3743 memcpy (new_entries, info->entries, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
3744 info->entries = new_entries;
3745 info->count_entries = new_count_entries;
3748 idx = info->num_entries;
3749 template = &info->entries [idx];
3750 template->info_type = rgctx_type;
3751 template->data = data;
3753 info->num_entries ++;
3759 * emit_get_gsharedvt_info:
3761 * This is similar to emit_get_rgctx_.., but loads the data from the gsharedvt info var instead of calling an rgctx fetch trampoline.
3764 emit_get_gsharedvt_info (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3769 idx = get_gsharedvt_info_slot (cfg, data, rgctx_type);
3770 /* Load info->entries [idx] */
3771 dreg = alloc_preg (cfg);
3772 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, cfg->gsharedvt_info_var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
3778 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type)
3780 return emit_get_gsharedvt_info (cfg, &klass->byval_arg, rgctx_type);
3784 * On return the caller must check @klass for load errors.
3787 emit_class_init (MonoCompile *cfg, MonoClass *klass)
3789 MonoInst *vtable_arg;
3792 context_used = mini_class_check_context_used (cfg, klass);
3795 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
3796 klass, MONO_RGCTX_INFO_VTABLE);
3798 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3802 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
3805 if (!COMPILE_LLVM (cfg) && cfg->backend->have_op_generic_class_init) {
3809 * Using an opcode instead of emitting IR here allows the hiding of the call inside the opcode,
3810 * so this doesn't have to clobber any regs and it doesn't break basic blocks.
3812 MONO_INST_NEW (cfg, ins, OP_GENERIC_CLASS_INIT);
3813 ins->sreg1 = vtable_arg->dreg;
3814 MONO_ADD_INS (cfg->cbb, ins);
3816 static int byte_offset = -1;
3817 static guint8 bitmask;
3818 int bits_reg, inited_reg;
3819 MonoBasicBlock *inited_bb;
3820 MonoInst *args [16];
3822 if (byte_offset < 0)
3823 mono_marshal_find_bitfield_offset (MonoVTable, initialized, &byte_offset, &bitmask);
3825 bits_reg = alloc_ireg (cfg);
3826 inited_reg = alloc_ireg (cfg);
3828 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, bits_reg, vtable_arg->dreg, byte_offset);
3829 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, inited_reg, bits_reg, bitmask);
3831 NEW_BBLOCK (cfg, inited_bb);
3833 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, inited_reg, 0);
3834 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBNE_UN, inited_bb);
3836 args [0] = vtable_arg;
3837 mono_emit_jit_icall (cfg, mono_generic_class_init, args);
3839 MONO_START_BB (cfg, inited_bb);
3844 emit_seq_point (MonoCompile *cfg, MonoMethod *method, guint8* ip, gboolean intr_loc, gboolean nonempty_stack)
3848 if (cfg->gen_seq_points && cfg->method == method) {
3849 NEW_SEQ_POINT (cfg, ins, ip - cfg->header->code, intr_loc);
3851 ins->flags |= MONO_INST_NONEMPTY_STACK;
3852 MONO_ADD_INS (cfg->cbb, ins);
3857 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg, gboolean null_check)
3859 if (mini_get_debug_options ()->better_cast_details) {
3860 int vtable_reg = alloc_preg (cfg);
3861 int klass_reg = alloc_preg (cfg);
3862 MonoBasicBlock *is_null_bb = NULL;
3864 int to_klass_reg, context_used;
3867 NEW_BBLOCK (cfg, is_null_bb);
3869 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3870 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3873 tls_get = mono_get_jit_tls_intrinsic (cfg);
3875 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3879 MONO_ADD_INS (cfg->cbb, tls_get);
3880 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3881 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3883 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3885 context_used = mini_class_check_context_used (cfg, klass);
3887 MonoInst *class_ins;
3889 class_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3890 to_klass_reg = class_ins->dreg;
3892 to_klass_reg = alloc_preg (cfg);
3893 MONO_EMIT_NEW_CLASSCONST (cfg, to_klass_reg, klass);
3895 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3898 MONO_START_BB (cfg, is_null_bb);
3903 reset_cast_details (MonoCompile *cfg)
3905 /* Reset the variables holding the cast details */
3906 if (mini_get_debug_options ()->better_cast_details) {
3907 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3909 MONO_ADD_INS (cfg->cbb, tls_get);
3910 /* It is enough to reset the from field */
3911 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3916 * On return the caller must check @array_class for load errors
3919 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
3921 int vtable_reg = alloc_preg (cfg);
3924 context_used = mini_class_check_context_used (cfg, array_class);
3926 save_cast_details (cfg, array_class, obj->dreg, FALSE);
3928 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3930 if (cfg->opt & MONO_OPT_SHARED) {
3931 int class_reg = alloc_preg (cfg);
3934 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3935 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_CLASS, array_class);
3936 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, ins->dreg);
3937 } else if (context_used) {
3938 MonoInst *vtable_ins;
3940 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
3941 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
3943 if (cfg->compile_aot) {
3947 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3949 vt_reg = alloc_preg (cfg);
3950 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
3951 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
3954 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3956 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
3960 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
3962 reset_cast_details (cfg);
3966 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
3967 * generic code is generated.
3970 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
3972 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
3975 MonoInst *rgctx, *addr;
3977 /* FIXME: What if the class is shared? We might not
3978 have to get the address of the method from the
3980 addr = emit_get_rgctx_method (cfg, context_used, method,
3981 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3983 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3985 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
3987 gboolean pass_vtable, pass_mrgctx;
3988 MonoInst *rgctx_arg = NULL;
3990 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
3991 g_assert (!pass_mrgctx);
3994 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3997 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
4000 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
4005 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
4009 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
4010 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
4011 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
4012 int rank_reg = alloc_dreg (cfg ,STACK_I4);
4014 obj_reg = sp [0]->dreg;
4015 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4016 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
4018 /* FIXME: generics */
4019 g_assert (klass->rank == 0);
4022 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
4023 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
4025 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4026 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, element_class));
4029 MonoInst *element_class;
4031 /* This assertion is from the unboxcast insn */
4032 g_assert (klass->rank == 0);
4034 element_class = emit_get_rgctx_klass (cfg, context_used,
4035 klass, MONO_RGCTX_INFO_ELEMENT_KLASS);
4037 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
4038 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
4040 save_cast_details (cfg, klass->element_class, obj_reg, FALSE);
4041 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
4042 reset_cast_details (cfg);
4045 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), obj_reg, sizeof (MonoObject));
4046 MONO_ADD_INS (cfg->cbb, add);
4047 add->type = STACK_MP;
4054 handle_unbox_gsharedvt (MonoCompile *cfg, MonoClass *klass, MonoInst *obj)
4056 MonoInst *addr, *klass_inst, *is_ref, *args[16];
4057 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
4061 klass_inst = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_KLASS);
4067 args [1] = klass_inst;
4070 obj = mono_emit_jit_icall (cfg, mono_object_castclass_unbox, args);
4072 NEW_BBLOCK (cfg, is_ref_bb);
4073 NEW_BBLOCK (cfg, is_nullable_bb);
4074 NEW_BBLOCK (cfg, end_bb);
4075 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
4076 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
4077 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
4079 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_NULLABLE);
4080 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
4082 /* This will contain either the address of the unboxed vtype, or an address of the temporary where the ref is stored */
4083 addr_reg = alloc_dreg (cfg, STACK_MP);
4087 NEW_BIALU_IMM (cfg, addr, OP_ADD_IMM, addr_reg, obj->dreg, sizeof (MonoObject));
4088 MONO_ADD_INS (cfg->cbb, addr);
4090 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4093 MONO_START_BB (cfg, is_ref_bb);
4095 /* Save the ref to a temporary */
4096 dreg = alloc_ireg (cfg);
4097 EMIT_NEW_VARLOADA_VREG (cfg, addr, dreg, &klass->byval_arg);
4098 addr->dreg = addr_reg;
4099 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, obj->dreg);
4100 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4103 MONO_START_BB (cfg, is_nullable_bb);
4106 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_NULLABLE_CLASS_UNBOX);
4107 MonoInst *unbox_call;
4108 MonoMethodSignature *unbox_sig;
4110 unbox_sig = mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
4111 unbox_sig->ret = &klass->byval_arg;
4112 unbox_sig->param_count = 1;
4113 unbox_sig->params [0] = &mono_defaults.object_class->byval_arg;
4114 unbox_call = mono_emit_calli (cfg, unbox_sig, &obj, addr, NULL, NULL);
4116 EMIT_NEW_VARLOADA_VREG (cfg, addr, unbox_call->dreg, &klass->byval_arg);
4117 addr->dreg = addr_reg;
4120 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4123 MONO_START_BB (cfg, end_bb);
4126 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr_reg, 0);
4132 * Returns NULL and set the cfg exception on error.
4135 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
4137 MonoInst *iargs [2];
4143 MonoInst *iargs [2];
4144 gboolean known_instance_size = !mini_is_gsharedvt_klass (klass);
4146 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (klass, for_box, known_instance_size);
4148 if (cfg->opt & MONO_OPT_SHARED)
4149 rgctx_info = MONO_RGCTX_INFO_KLASS;
4151 rgctx_info = MONO_RGCTX_INFO_VTABLE;
4152 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
4154 if (cfg->opt & MONO_OPT_SHARED) {
4155 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4157 alloc_ftn = mono_object_new;
4160 alloc_ftn = mono_object_new_specific;
4163 if (managed_alloc && !(cfg->opt & MONO_OPT_SHARED)) {
4164 if (known_instance_size) {
4165 int size = mono_class_instance_size (klass);
4166 if (size < sizeof (MonoObject))
4167 g_error ("Invalid size %d for class %s", size, mono_type_get_full_name (klass));
4169 EMIT_NEW_ICONST (cfg, iargs [1], mono_gc_get_aligned_size_for_allocator (size));
4171 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
4174 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
4177 if (cfg->opt & MONO_OPT_SHARED) {
4178 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4179 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
4181 alloc_ftn = mono_object_new;
4182 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
4183 /* This happens often in argument checking code, eg. throw new FooException... */
4184 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
4185 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
4186 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
4188 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
4189 MonoMethod *managed_alloc = NULL;
4193 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4194 cfg->exception_ptr = klass;
4198 managed_alloc = mono_gc_get_managed_allocator (klass, for_box, TRUE);
4200 if (managed_alloc) {
4201 int size = mono_class_instance_size (klass);
4202 if (size < sizeof (MonoObject))
4203 g_error ("Invalid size %d for class %s", size, mono_type_get_full_name (klass));
4205 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4206 EMIT_NEW_ICONST (cfg, iargs [1], mono_gc_get_aligned_size_for_allocator (size));
4207 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
4209 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
4211 guint32 lw = vtable->klass->instance_size;
4212 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
4213 EMIT_NEW_ICONST (cfg, iargs [0], lw);
4214 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
4217 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4221 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
4225 * Returns NULL and set the cfg exception on error.
4228 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used)
4230 MonoInst *alloc, *ins;
4232 if (mono_class_is_nullable (klass)) {
4233 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
4236 /* FIXME: What if the class is shared? We might not
4237 have to get the method address from the RGCTX. */
4238 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
4239 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
4240 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
4242 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
4244 gboolean pass_vtable, pass_mrgctx;
4245 MonoInst *rgctx_arg = NULL;
4247 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
4248 g_assert (!pass_mrgctx);
4251 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4254 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
4257 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
4261 if (mini_is_gsharedvt_klass (klass)) {
4262 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
4263 MonoInst *res, *is_ref, *src_var, *addr;
4266 dreg = alloc_ireg (cfg);
4268 NEW_BBLOCK (cfg, is_ref_bb);
4269 NEW_BBLOCK (cfg, is_nullable_bb);
4270 NEW_BBLOCK (cfg, end_bb);
4271 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
4272 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
4273 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
4275 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, MONO_GSHAREDVT_BOX_TYPE_NULLABLE);
4276 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
4279 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4282 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4283 ins->opcode = OP_STOREV_MEMBASE;
4285 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, alloc->dreg);
4286 res->type = STACK_OBJ;
4288 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4291 MONO_START_BB (cfg, is_ref_bb);
4293 /* val is a vtype, so has to load the value manually */
4294 src_var = get_vreg_to_inst (cfg, val->dreg);
4296 src_var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, val->dreg);
4297 EMIT_NEW_VARLOADA (cfg, addr, src_var, src_var->inst_vtype);
4298 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, addr->dreg, 0);
4299 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4302 MONO_START_BB (cfg, is_nullable_bb);
4305 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass,
4306 MONO_RGCTX_INFO_NULLABLE_CLASS_BOX);
4308 MonoMethodSignature *box_sig;
4311 * klass is Nullable<T>, need to call Nullable<T>.Box () using a gsharedvt signature, but we cannot
4312 * construct that method at JIT time, so have to do things by hand.
4314 box_sig = mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
4315 box_sig->ret = &mono_defaults.object_class->byval_arg;
4316 box_sig->param_count = 1;
4317 box_sig->params [0] = &klass->byval_arg;
4318 box_call = mono_emit_calli (cfg, box_sig, &val, addr, NULL, NULL);
4319 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, box_call->dreg);
4320 res->type = STACK_OBJ;
4324 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4326 MONO_START_BB (cfg, end_bb);
4330 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4334 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4340 mini_class_has_reference_variant_generic_argument (MonoCompile *cfg, MonoClass *klass, int context_used)
4343 MonoGenericContainer *container;
4344 MonoGenericInst *ginst;
4346 if (klass->generic_class) {
4347 container = klass->generic_class->container_class->generic_container;
4348 ginst = klass->generic_class->context.class_inst;
4349 } else if (klass->generic_container && context_used) {
4350 container = klass->generic_container;
4351 ginst = container->context.class_inst;
4356 for (i = 0; i < container->type_argc; ++i) {
4358 if (!(mono_generic_container_get_param_info (container, i)->flags & (MONO_GEN_PARAM_VARIANT|MONO_GEN_PARAM_COVARIANT)))
4360 type = ginst->type_argv [i];
4361 if (mini_type_is_reference (type))
4367 static GHashTable* direct_icall_type_hash;
4370 icall_is_direct_callable (MonoCompile *cfg, MonoMethod *cmethod)
4372 /* LLVM on amd64 can't handle calls to non-32 bit addresses */
4373 if (!direct_icalls_enabled (cfg))
4377 * An icall is directly callable if it doesn't directly or indirectly call mono_raise_exception ().
4378 * Whitelist a few icalls for now.
4380 if (!direct_icall_type_hash) {
4381 GHashTable *h = g_hash_table_new (g_str_hash, g_str_equal);
4383 g_hash_table_insert (h, (char*)"Decimal", GUINT_TO_POINTER (1));
4384 g_hash_table_insert (h, (char*)"Number", GUINT_TO_POINTER (1));
4385 g_hash_table_insert (h, (char*)"Buffer", GUINT_TO_POINTER (1));
4386 g_hash_table_insert (h, (char*)"Monitor", GUINT_TO_POINTER (1));
4387 mono_memory_barrier ();
4388 direct_icall_type_hash = h;
4391 if (cmethod->klass == mono_defaults.math_class)
4393 /* No locking needed */
4394 if (cmethod->klass->image == mono_defaults.corlib && g_hash_table_lookup (direct_icall_type_hash, cmethod->klass->name))
4399 #define is_complex_isinst(klass) ((klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || mono_class_is_marshalbyref (klass) || (klass->flags & TYPE_ATTRIBUTE_SEALED) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
4402 emit_castclass_with_cache (MonoCompile *cfg, MonoClass *klass, MonoInst **args)
4404 MonoMethod *mono_castclass;
4407 mono_castclass = mono_marshal_get_castclass_with_cache ();
4409 save_cast_details (cfg, klass, args [0]->dreg, TRUE);
4410 res = mono_emit_method_call (cfg, mono_castclass, args, NULL);
4411 reset_cast_details (cfg);
4417 get_castclass_cache_idx (MonoCompile *cfg)
4419 /* Each CASTCLASS_CACHE patch needs a unique index which identifies the call site */
4420 cfg->castclass_cache_index ++;
4421 return (cfg->method_index << 16) | cfg->castclass_cache_index;
4425 emit_castclass_with_cache_nonshared (MonoCompile *cfg, MonoInst *obj, MonoClass *klass)
4434 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
4437 idx = get_castclass_cache_idx (cfg);
4438 args [2] = emit_runtime_constant (cfg, MONO_PATCH_INFO_CASTCLASS_CACHE, GINT_TO_POINTER (idx));
4440 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
4441 return emit_castclass_with_cache (cfg, klass, args);
4445 * Returns NULL and set the cfg exception on error.
4448 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, guint8 *ip, int *inline_costs)
4450 MonoBasicBlock *is_null_bb;
4451 int obj_reg = src->dreg;
4452 int vtable_reg = alloc_preg (cfg);
4454 MonoInst *klass_inst = NULL, *res;
4456 context_used = mini_class_check_context_used (cfg, klass);
4458 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
4459 res = emit_castclass_with_cache_nonshared (cfg, src, klass);
4460 (*inline_costs) += 2;
4462 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
4463 MonoMethod *mono_castclass;
4464 MonoInst *iargs [1];
4467 mono_castclass = mono_marshal_get_castclass (klass);
4470 save_cast_details (cfg, klass, src->dreg, TRUE);
4471 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
4472 iargs, ip, cfg->real_offset, TRUE);
4473 reset_cast_details (cfg);
4474 CHECK_CFG_EXCEPTION;
4475 g_assert (costs > 0);
4477 cfg->real_offset += 5;
4479 (*inline_costs) += costs;
4487 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
4488 MonoInst *cache_ins;
4490 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
4495 /* klass - it's the second element of the cache entry*/
4496 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
4499 args [2] = cache_ins;
4501 return emit_castclass_with_cache (cfg, klass, args);
4504 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
4507 NEW_BBLOCK (cfg, is_null_bb);
4509 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4510 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
4512 save_cast_details (cfg, klass, obj_reg, FALSE);
4514 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4515 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4516 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
4518 int klass_reg = alloc_preg (cfg);
4520 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4522 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4523 /* the remoting code is broken, access the class for now */
4524 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4525 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4527 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4528 cfg->exception_ptr = klass;
4531 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4533 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4534 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4536 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
4538 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4539 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
4543 MONO_START_BB (cfg, is_null_bb);
4545 reset_cast_details (cfg);
4554 * Returns NULL and set the cfg exception on error.
4557 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
4560 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
4561 int obj_reg = src->dreg;
4562 int vtable_reg = alloc_preg (cfg);
4563 int res_reg = alloc_ireg_ref (cfg);
4564 MonoInst *klass_inst = NULL;
4569 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
4570 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
4571 MonoInst *cache_ins;
4573 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
4578 /* klass - it's the second element of the cache entry*/
4579 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
4582 args [2] = cache_ins;
4584 return mono_emit_method_call (cfg, mono_isinst, args, NULL);
4587 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
4590 NEW_BBLOCK (cfg, is_null_bb);
4591 NEW_BBLOCK (cfg, false_bb);
4592 NEW_BBLOCK (cfg, end_bb);
4594 /* Do the assignment at the beginning, so the other assignment can be if converted */
4595 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
4596 ins->type = STACK_OBJ;
4599 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4600 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
4602 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4604 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4605 g_assert (!context_used);
4606 /* the is_null_bb target simply copies the input register to the output */
4607 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
4609 int klass_reg = alloc_preg (cfg);
4612 int rank_reg = alloc_preg (cfg);
4613 int eclass_reg = alloc_preg (cfg);
4615 g_assert (!context_used);
4616 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
4617 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
4618 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4619 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4620 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, cast_class));
4621 if (klass->cast_class == mono_defaults.object_class) {
4622 int parent_reg = alloc_preg (cfg);
4623 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, MONO_STRUCT_OFFSET (MonoClass, parent));
4624 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
4625 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4626 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4627 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
4628 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
4629 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4630 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4631 } else if (klass->cast_class == mono_defaults.enum_class) {
4632 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4633 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4634 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
4635 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4637 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
4638 /* Check that the object is a vector too */
4639 int bounds_reg = alloc_preg (cfg);
4640 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, MONO_STRUCT_OFFSET (MonoArray, bounds));
4641 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
4642 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4645 /* the is_null_bb target simply copies the input register to the output */
4646 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4648 } else if (mono_class_is_nullable (klass)) {
4649 g_assert (!context_used);
4650 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4651 /* the is_null_bb target simply copies the input register to the output */
4652 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
4654 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4655 g_assert (!context_used);
4656 /* the remoting code is broken, access the class for now */
4657 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4658 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4660 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4661 cfg->exception_ptr = klass;
4664 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4666 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4667 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4669 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4670 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
4672 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4673 /* the is_null_bb target simply copies the input register to the output */
4674 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
4679 MONO_START_BB (cfg, false_bb);
4681 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
4682 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4684 MONO_START_BB (cfg, is_null_bb);
4686 MONO_START_BB (cfg, end_bb);
4692 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4694 /* This opcode takes as input an object reference and a class, and returns:
4695 0) if the object is an instance of the class,
4696 1) if the object is not instance of the class,
4697 2) if the object is a proxy whose type cannot be determined */
4700 #ifndef DISABLE_REMOTING
4701 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
4703 MonoBasicBlock *true_bb, *false_bb, *end_bb;
4705 int obj_reg = src->dreg;
4706 int dreg = alloc_ireg (cfg);
4708 #ifndef DISABLE_REMOTING
4709 int klass_reg = alloc_preg (cfg);
4712 NEW_BBLOCK (cfg, true_bb);
4713 NEW_BBLOCK (cfg, false_bb);
4714 NEW_BBLOCK (cfg, end_bb);
4715 #ifndef DISABLE_REMOTING
4716 NEW_BBLOCK (cfg, false2_bb);
4717 NEW_BBLOCK (cfg, no_proxy_bb);
4720 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4721 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
4723 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4724 #ifndef DISABLE_REMOTING
4725 NEW_BBLOCK (cfg, interface_fail_bb);
4728 tmp_reg = alloc_preg (cfg);
4729 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4730 #ifndef DISABLE_REMOTING
4731 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
4732 MONO_START_BB (cfg, interface_fail_bb);
4733 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4735 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
4737 tmp_reg = alloc_preg (cfg);
4738 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4739 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4740 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
4742 mini_emit_iface_cast (cfg, tmp_reg, klass, false_bb, true_bb);
4745 #ifndef DISABLE_REMOTING
4746 tmp_reg = alloc_preg (cfg);
4747 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4748 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4750 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4751 tmp_reg = alloc_preg (cfg);
4752 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4753 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4755 tmp_reg = alloc_preg (cfg);
4756 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4757 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4758 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4760 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
4761 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
4763 MONO_START_BB (cfg, no_proxy_bb);
4765 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
4767 g_error ("transparent proxy support is disabled while trying to JIT code that uses it");
4771 MONO_START_BB (cfg, false_bb);
4773 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4774 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4776 #ifndef DISABLE_REMOTING
4777 MONO_START_BB (cfg, false2_bb);
4779 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
4780 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4783 MONO_START_BB (cfg, true_bb);
4785 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4787 MONO_START_BB (cfg, end_bb);
4790 MONO_INST_NEW (cfg, ins, OP_ICONST);
4792 ins->type = STACK_I4;
4798 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4800 /* This opcode takes as input an object reference and a class, and returns:
4801 0) if the object is an instance of the class,
4802 1) if the object is a proxy whose type cannot be determined
4803 an InvalidCastException exception is thrown otherwhise*/
4806 #ifndef DISABLE_REMOTING
4807 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
4809 MonoBasicBlock *ok_result_bb;
4811 int obj_reg = src->dreg;
4812 int dreg = alloc_ireg (cfg);
4813 int tmp_reg = alloc_preg (cfg);
4815 #ifndef DISABLE_REMOTING
4816 int klass_reg = alloc_preg (cfg);
4817 NEW_BBLOCK (cfg, end_bb);
4820 NEW_BBLOCK (cfg, ok_result_bb);
4822 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4823 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
4825 save_cast_details (cfg, klass, obj_reg, FALSE);
4827 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4828 #ifndef DISABLE_REMOTING
4829 NEW_BBLOCK (cfg, interface_fail_bb);
4831 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4832 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
4833 MONO_START_BB (cfg, interface_fail_bb);
4834 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4836 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
4838 tmp_reg = alloc_preg (cfg);
4839 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4840 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4841 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
4843 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4844 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4846 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4847 mini_emit_iface_cast (cfg, tmp_reg, klass, NULL, NULL);
4848 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, ok_result_bb);
4851 #ifndef DISABLE_REMOTING
4852 NEW_BBLOCK (cfg, no_proxy_bb);
4854 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4855 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4856 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4858 tmp_reg = alloc_preg (cfg);
4859 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4860 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4862 tmp_reg = alloc_preg (cfg);
4863 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4864 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4865 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4867 NEW_BBLOCK (cfg, fail_1_bb);
4869 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
4871 MONO_START_BB (cfg, fail_1_bb);
4873 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4874 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4876 MONO_START_BB (cfg, no_proxy_bb);
4878 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
4880 g_error ("Transparent proxy support is disabled while trying to JIT code that uses it");
4884 MONO_START_BB (cfg, ok_result_bb);
4886 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4888 #ifndef DISABLE_REMOTING
4889 MONO_START_BB (cfg, end_bb);
4893 MONO_INST_NEW (cfg, ins, OP_ICONST);
4895 ins->type = STACK_I4;
4900 static G_GNUC_UNUSED MonoInst*
4901 handle_enum_has_flag (MonoCompile *cfg, MonoClass *klass, MonoInst *enum_this, MonoInst *enum_flag)
4903 MonoType *enum_type = mono_type_get_underlying_type (&klass->byval_arg);
4904 guint32 load_opc = mono_type_to_load_membase (cfg, enum_type);
4907 switch (enum_type->type) {
4910 #if SIZEOF_REGISTER == 8
4922 MonoInst *load, *and, *cmp, *ceq;
4923 int enum_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
4924 int and_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
4925 int dest_reg = alloc_ireg (cfg);
4927 EMIT_NEW_LOAD_MEMBASE (cfg, load, load_opc, enum_reg, enum_this->dreg, 0);
4928 EMIT_NEW_BIALU (cfg, and, is_i4 ? OP_IAND : OP_LAND, and_reg, enum_reg, enum_flag->dreg);
4929 EMIT_NEW_BIALU (cfg, cmp, is_i4 ? OP_ICOMPARE : OP_LCOMPARE, -1, and_reg, enum_flag->dreg);
4930 EMIT_NEW_UNALU (cfg, ceq, is_i4 ? OP_ICEQ : OP_LCEQ, dest_reg, -1);
4932 ceq->type = STACK_I4;
4935 load = mono_decompose_opcode (cfg, load);
4936 and = mono_decompose_opcode (cfg, and);
4937 cmp = mono_decompose_opcode (cfg, cmp);
4938 ceq = mono_decompose_opcode (cfg, ceq);
4946 * Returns NULL and set the cfg exception on error.
4948 static G_GNUC_UNUSED MonoInst*
4949 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used, gboolean virtual)
4953 gpointer trampoline;
4954 MonoInst *obj, *method_ins, *tramp_ins;
4958 if (virtual && !cfg->llvm_only) {
4959 MonoMethod *invoke = mono_get_delegate_invoke (klass);
4962 if (!mono_get_delegate_virtual_invoke_impl (mono_method_signature (invoke), context_used ? NULL : method))
4966 obj = handle_alloc (cfg, klass, FALSE, mono_class_check_context_used (klass));
4970 if (cfg->llvm_only) {
4971 MonoInst *args [16];
4974 * If the method to be called needs an rgctx, we can't fall back to mono_delegate_ctor (), since it might receive
4975 * the address of a gshared method. So use a JIT icall.
4976 * FIXME: Optimize this.
4980 args [2] = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
4981 mono_emit_jit_icall (cfg, virtual ? mono_init_delegate_virtual : mono_init_delegate, args);
4986 /* Inline the contents of mono_delegate_ctor */
4988 /* Set target field */
4989 /* Optimize away setting of NULL target */
4990 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0)) {
4991 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
4992 if (cfg->gen_write_barriers) {
4993 dreg = alloc_preg (cfg);
4994 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target));
4995 emit_write_barrier (cfg, ptr, target);
4999 /* Set method field */
5000 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
5001 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
5004 * To avoid looking up the compiled code belonging to the target method
5005 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
5006 * store it, and we fill it after the method has been compiled.
5008 if (!method->dynamic && !(cfg->opt & MONO_OPT_SHARED)) {
5009 MonoInst *code_slot_ins;
5012 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
5014 domain = mono_domain_get ();
5015 mono_domain_lock (domain);
5016 if (!domain_jit_info (domain)->method_code_hash)
5017 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
5018 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
5020 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
5021 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
5023 mono_domain_unlock (domain);
5025 code_slot_ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_METHOD_CODE_SLOT, method);
5027 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
5030 if (cfg->compile_aot) {
5031 MonoDelegateClassMethodPair *del_tramp;
5033 del_tramp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoDelegateClassMethodPair));
5034 del_tramp->klass = klass;
5035 del_tramp->method = context_used ? NULL : method;
5036 del_tramp->is_virtual = virtual;
5037 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, del_tramp);
5040 trampoline = mono_create_delegate_virtual_trampoline (cfg->domain, klass, context_used ? NULL : method);
5042 trampoline = mono_create_delegate_trampoline_info (cfg->domain, klass, context_used ? NULL : method);
5043 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
5046 /* Set invoke_impl field */
5048 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
5050 dreg = alloc_preg (cfg);
5051 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, invoke_impl));
5052 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), dreg);
5054 dreg = alloc_preg (cfg);
5055 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, method_ptr));
5056 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr), dreg);
5059 dreg = alloc_preg (cfg);
5060 MONO_EMIT_NEW_ICONST (cfg, dreg, virtual ? 1 : 0);
5061 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_is_virtual), dreg);
5063 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
5069 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
5071 MonoJitICallInfo *info;
5073 /* Need to register the icall so it gets an icall wrapper */
5074 info = mono_get_array_new_va_icall (rank);
5076 cfg->flags |= MONO_CFG_HAS_VARARGS;
5078 /* mono_array_new_va () needs a vararg calling convention */
5079 cfg->disable_llvm = TRUE;
5081 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
5082 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
5086 * handle_constrained_gsharedvt_call:
5088 * Handle constrained calls where the receiver is a gsharedvt type.
5089 * Return the instruction representing the call. Set the cfg exception on failure.
5092 handle_constrained_gsharedvt_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp, MonoClass *constrained_class,
5093 gboolean *ref_emit_widen)
5095 MonoInst *ins = NULL;
5096 gboolean emit_widen = *ref_emit_widen;
5099 * Constrained calls need to behave differently at runtime dependending on whenever the receiver is instantiated as ref type or as a vtype.
5100 * This is hard to do with the current call code, since we would have to emit a branch and two different calls. So instead, we
5101 * pack the arguments into an array, and do the rest of the work in in an icall.
5103 if (((cmethod->klass == mono_defaults.object_class) || (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) || (!cmethod->klass->valuetype && cmethod->klass->image != mono_defaults.corlib)) &&
5104 (MONO_TYPE_IS_VOID (fsig->ret) || MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_IS_REFERENCE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret) || mini_is_gsharedvt_type (fsig->ret)) &&
5105 (fsig->param_count == 0 || (!fsig->hasthis && fsig->param_count == 1) || (fsig->param_count == 1 && (MONO_TYPE_IS_REFERENCE (fsig->params [0]) || fsig->params [0]->byref || mini_is_gsharedvt_type (fsig->params [0]))))) {
5106 MonoInst *args [16];
5109 * This case handles calls to
5110 * - object:ToString()/Equals()/GetHashCode(),
5111 * - System.IComparable<T>:CompareTo()
5112 * - System.IEquatable<T>:Equals ()
5113 * plus some simple interface calls enough to support AsyncTaskMethodBuilder.
5117 if (mono_method_check_context_used (cmethod))
5118 args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (cmethod), cmethod, MONO_RGCTX_INFO_METHOD);
5120 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
5121 args [2] = emit_get_rgctx_klass (cfg, mono_class_check_context_used (constrained_class), constrained_class, MONO_RGCTX_INFO_KLASS);
5123 /* !fsig->hasthis is for the wrapper for the Object.GetType () icall */
5124 if (fsig->hasthis && fsig->param_count) {
5125 /* Pass the arguments using a localloc-ed array using the format expected by runtime_invoke () */
5126 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
5127 ins->dreg = alloc_preg (cfg);
5128 ins->inst_imm = fsig->param_count * sizeof (mgreg_t);
5129 MONO_ADD_INS (cfg->cbb, ins);
5132 if (mini_is_gsharedvt_type (fsig->params [0])) {
5133 int addr_reg, deref_arg_reg;
5135 ins = emit_get_gsharedvt_info_klass (cfg, mono_class_from_mono_type (fsig->params [0]), MONO_RGCTX_INFO_CLASS_BOX_TYPE);
5136 deref_arg_reg = alloc_preg (cfg);
5137 /* deref_arg = BOX_TYPE != MONO_GSHAREDVT_BOX_TYPE_VTYPE */
5138 EMIT_NEW_BIALU_IMM (cfg, args [3], OP_ISUB_IMM, deref_arg_reg, ins->dreg, 1);
5140 EMIT_NEW_VARLOADA_VREG (cfg, ins, sp [1]->dreg, fsig->params [0]);
5141 addr_reg = ins->dreg;
5142 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, addr_reg);
5144 EMIT_NEW_ICONST (cfg, args [3], 0);
5145 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, sp [1]->dreg);
5148 EMIT_NEW_ICONST (cfg, args [3], 0);
5149 EMIT_NEW_ICONST (cfg, args [4], 0);
5151 ins = mono_emit_jit_icall (cfg, mono_gsharedvt_constrained_call, args);
5154 if (mini_is_gsharedvt_type (fsig->ret)) {
5155 ins = handle_unbox_gsharedvt (cfg, mono_class_from_mono_type (fsig->ret), ins);
5156 } else if (MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret)) {
5160 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), ins->dreg, sizeof (MonoObject));
5161 MONO_ADD_INS (cfg->cbb, add);
5163 NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, add->dreg, 0);
5164 MONO_ADD_INS (cfg->cbb, ins);
5165 /* ins represents the call result */
5168 GSHAREDVT_FAILURE (CEE_CALLVIRT);
5171 *ref_emit_widen = emit_widen;
5180 mono_emit_load_got_addr (MonoCompile *cfg)
5182 MonoInst *getaddr, *dummy_use;
5184 if (!cfg->got_var || cfg->got_var_allocated)
5187 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
5188 getaddr->cil_code = cfg->header->code;
5189 getaddr->dreg = cfg->got_var->dreg;
5191 /* Add it to the start of the first bblock */
5192 if (cfg->bb_entry->code) {
5193 getaddr->next = cfg->bb_entry->code;
5194 cfg->bb_entry->code = getaddr;
5197 MONO_ADD_INS (cfg->bb_entry, getaddr);
5199 cfg->got_var_allocated = TRUE;
5202 * Add a dummy use to keep the got_var alive, since real uses might
5203 * only be generated by the back ends.
5204 * Add it to end_bblock, so the variable's lifetime covers the whole
5206 * It would be better to make the usage of the got var explicit in all
5207 * cases when the backend needs it (i.e. calls, throw etc.), so this
5208 * wouldn't be needed.
5210 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
5211 MONO_ADD_INS (cfg->bb_exit, dummy_use);
5214 static int inline_limit;
5215 static gboolean inline_limit_inited;
5218 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
5220 MonoMethodHeaderSummary header;
5222 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
5223 MonoMethodSignature *sig = mono_method_signature (method);
5227 if (cfg->disable_inline)
5232 if (cfg->inline_depth > 10)
5235 if (!mono_method_get_header_summary (method, &header))
5238 /*runtime, icall and pinvoke are checked by summary call*/
5239 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
5240 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
5241 (mono_class_is_marshalbyref (method->klass)) ||
5245 /* also consider num_locals? */
5246 /* Do the size check early to avoid creating vtables */
5247 if (!inline_limit_inited) {
5248 if (g_getenv ("MONO_INLINELIMIT"))
5249 inline_limit = atoi (g_getenv ("MONO_INLINELIMIT"));
5251 inline_limit = INLINE_LENGTH_LIMIT;
5252 inline_limit_inited = TRUE;
5254 if (header.code_size >= inline_limit && !(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
5258 * if we can initialize the class of the method right away, we do,
5259 * otherwise we don't allow inlining if the class needs initialization,
5260 * since it would mean inserting a call to mono_runtime_class_init()
5261 * inside the inlined code
5263 if (!(cfg->opt & MONO_OPT_SHARED)) {
5264 /* The AggressiveInlining hint is a good excuse to force that cctor to run. */
5265 if (method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING) {
5266 vtable = mono_class_vtable (cfg->domain, method->klass);
5269 if (!cfg->compile_aot)
5270 mono_runtime_class_init (vtable);
5271 } else if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
5272 if (cfg->run_cctors && method->klass->has_cctor) {
5273 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
5274 if (!method->klass->runtime_info)
5275 /* No vtable created yet */
5277 vtable = mono_class_vtable (cfg->domain, method->klass);
5280 /* This makes so that inline cannot trigger */
5281 /* .cctors: too many apps depend on them */
5282 /* running with a specific order... */
5283 if (! vtable->initialized)
5285 mono_runtime_class_init (vtable);
5287 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
5288 if (!method->klass->runtime_info)
5289 /* No vtable created yet */
5291 vtable = mono_class_vtable (cfg->domain, method->klass);
5294 if (!vtable->initialized)
5299 * If we're compiling for shared code
5300 * the cctor will need to be run at aot method load time, for example,
5301 * or at the end of the compilation of the inlining method.
5303 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
5307 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
5308 if (mono_arch_is_soft_float ()) {
5310 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
5312 for (i = 0; i < sig->param_count; ++i)
5313 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
5318 if (g_list_find (cfg->dont_inline, method))
5325 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoClass *klass, MonoVTable *vtable)
5327 if (!cfg->compile_aot) {
5329 if (vtable->initialized)
5333 if (klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
5334 if (cfg->method == method)
5338 if (!mono_class_needs_cctor_run (klass, method))
5341 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (klass == method->klass))
5342 /* The initialization is already done before the method is called */
5349 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
5353 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
5356 if (mini_is_gsharedvt_variable_klass (klass)) {
5359 mono_class_init (klass);
5360 size = mono_class_array_element_size (klass);
5363 mult_reg = alloc_preg (cfg);
5364 array_reg = arr->dreg;
5365 index_reg = index->dreg;
5367 #if SIZEOF_REGISTER == 8
5368 /* The array reg is 64 bits but the index reg is only 32 */
5369 if (COMPILE_LLVM (cfg)) {
5371 index2_reg = index_reg;
5373 index2_reg = alloc_preg (cfg);
5374 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
5377 if (index->type == STACK_I8) {
5378 index2_reg = alloc_preg (cfg);
5379 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
5381 index2_reg = index_reg;
5386 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
5388 #if defined(TARGET_X86) || defined(TARGET_AMD64)
5389 if (size == 1 || size == 2 || size == 4 || size == 8) {
5390 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
5392 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], MONO_STRUCT_OFFSET (MonoArray, vector));
5393 ins->klass = mono_class_get_element_class (klass);
5394 ins->type = STACK_MP;
5400 add_reg = alloc_ireg_mp (cfg);
5403 MonoInst *rgctx_ins;
5406 g_assert (cfg->gshared);
5407 context_used = mini_class_check_context_used (cfg, klass);
5408 g_assert (context_used);
5409 rgctx_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE);
5410 MONO_EMIT_NEW_BIALU (cfg, OP_IMUL, mult_reg, index2_reg, rgctx_ins->dreg);
5412 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
5414 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
5415 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
5416 ins->klass = mono_class_get_element_class (klass);
5417 ins->type = STACK_MP;
5418 MONO_ADD_INS (cfg->cbb, ins);
5424 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
5426 int bounds_reg = alloc_preg (cfg);
5427 int add_reg = alloc_ireg_mp (cfg);
5428 int mult_reg = alloc_preg (cfg);
5429 int mult2_reg = alloc_preg (cfg);
5430 int low1_reg = alloc_preg (cfg);
5431 int low2_reg = alloc_preg (cfg);
5432 int high1_reg = alloc_preg (cfg);
5433 int high2_reg = alloc_preg (cfg);
5434 int realidx1_reg = alloc_preg (cfg);
5435 int realidx2_reg = alloc_preg (cfg);
5436 int sum_reg = alloc_preg (cfg);
5437 int index1, index2, tmpreg;
5441 mono_class_init (klass);
5442 size = mono_class_array_element_size (klass);
5444 index1 = index_ins1->dreg;
5445 index2 = index_ins2->dreg;
5447 #if SIZEOF_REGISTER == 8
5448 /* The array reg is 64 bits but the index reg is only 32 */
5449 if (COMPILE_LLVM (cfg)) {
5452 tmpreg = alloc_preg (cfg);
5453 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index1);
5455 tmpreg = alloc_preg (cfg);
5456 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index2);
5460 // FIXME: Do we need to do something here for i8 indexes, like in ldelema_1_ins ?
5464 /* range checking */
5465 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
5466 arr->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
5468 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
5469 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5470 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
5471 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
5472 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5473 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
5474 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
5476 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
5477 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5478 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
5479 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
5480 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5481 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
5482 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
5484 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
5485 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
5486 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
5487 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
5488 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
5490 ins->type = STACK_MP;
5492 MONO_ADD_INS (cfg->cbb, ins);
5498 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
5502 MonoMethod *addr_method;
5504 MonoClass *eclass = cmethod->klass->element_class;
5506 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
5509 return mini_emit_ldelema_1_ins (cfg, eclass, sp [0], sp [1], TRUE);
5511 /* emit_ldelema_2 depends on OP_LMUL */
5512 if (!cfg->backend->emulate_mul_div && rank == 2 && (cfg->opt & MONO_OPT_INTRINS) && !mini_is_gsharedvt_variable_klass (eclass)) {
5513 return mini_emit_ldelema_2_ins (cfg, eclass, sp [0], sp [1], sp [2]);
5516 if (mini_is_gsharedvt_variable_klass (eclass))
5519 element_size = mono_class_array_element_size (eclass);
5520 addr_method = mono_marshal_get_array_address (rank, element_size);
5521 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
5526 static MonoBreakPolicy
5527 always_insert_breakpoint (MonoMethod *method)
5529 return MONO_BREAK_POLICY_ALWAYS;
5532 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
5535 * mono_set_break_policy:
5536 * policy_callback: the new callback function
5538 * Allow embedders to decide wherther to actually obey breakpoint instructions
5539 * (both break IL instructions and Debugger.Break () method calls), for example
5540 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
5541 * untrusted or semi-trusted code.
5543 * @policy_callback will be called every time a break point instruction needs to
5544 * be inserted with the method argument being the method that calls Debugger.Break()
5545 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
5546 * if it wants the breakpoint to not be effective in the given method.
5547 * #MONO_BREAK_POLICY_ALWAYS is the default.
5550 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
5552 if (policy_callback)
5553 break_policy_func = policy_callback;
5555 break_policy_func = always_insert_breakpoint;
5559 should_insert_brekpoint (MonoMethod *method) {
5560 switch (break_policy_func (method)) {
5561 case MONO_BREAK_POLICY_ALWAYS:
5563 case MONO_BREAK_POLICY_NEVER:
5565 case MONO_BREAK_POLICY_ON_DBG:
5566 g_warning ("mdb no longer supported");
5569 g_warning ("Incorrect value returned from break policy callback");
5574 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
5576 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
5578 MonoInst *addr, *store, *load;
5579 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
5581 /* the bounds check is already done by the callers */
5582 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
5584 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
5585 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
5586 if (mini_type_is_reference (fsig->params [2]))
5587 emit_write_barrier (cfg, addr, load);
5589 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
5590 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
5597 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
5599 return mini_type_is_reference (&klass->byval_arg);
5603 emit_array_store (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, gboolean safety_checks)
5605 if (safety_checks && generic_class_is_reference_type (cfg, klass) &&
5606 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
5607 MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class, 1);
5608 MonoMethod *helper = mono_marshal_get_virtual_stelemref (obj_array);
5609 MonoInst *iargs [3];
5612 mono_class_setup_vtable (obj_array);
5613 g_assert (helper->slot);
5615 if (sp [0]->type != STACK_OBJ)
5617 if (sp [2]->type != STACK_OBJ)
5624 return mono_emit_method_call (cfg, helper, iargs, sp [0]);
5628 if (mini_is_gsharedvt_variable_klass (klass)) {
5631 // FIXME-VT: OP_ICONST optimization
5632 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
5633 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5634 ins->opcode = OP_STOREV_MEMBASE;
5635 } else if (sp [1]->opcode == OP_ICONST) {
5636 int array_reg = sp [0]->dreg;
5637 int index_reg = sp [1]->dreg;
5638 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
5641 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
5642 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
5644 MonoInst *addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], safety_checks);
5645 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5646 if (generic_class_is_reference_type (cfg, klass))
5647 emit_write_barrier (cfg, addr, sp [2]);
5654 emit_array_unsafe_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
5659 eklass = mono_class_from_mono_type (fsig->params [2]);
5661 eklass = mono_class_from_mono_type (fsig->ret);
5664 return emit_array_store (cfg, eklass, args, FALSE);
5666 MonoInst *ins, *addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
5667 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &eklass->byval_arg, addr->dreg, 0);
5673 is_unsafe_mov_compatible (MonoCompile *cfg, MonoClass *param_klass, MonoClass *return_klass)
5676 int param_size, return_size;
5678 param_klass = mono_class_from_mono_type (mini_get_underlying_type (¶m_klass->byval_arg));
5679 return_klass = mono_class_from_mono_type (mini_get_underlying_type (&return_klass->byval_arg));
5681 if (cfg->verbose_level > 3)
5682 printf ("[UNSAFE-MOV-INTRISIC] %s <- %s\n", return_klass->name, param_klass->name);
5684 //Don't allow mixing reference types with value types
5685 if (param_klass->valuetype != return_klass->valuetype) {
5686 if (cfg->verbose_level > 3)
5687 printf ("[UNSAFE-MOV-INTRISIC]\tone of the args is a valuetype and the other is not\n");
5691 if (!param_klass->valuetype) {
5692 if (cfg->verbose_level > 3)
5693 printf ("[UNSAFE-MOV-INTRISIC]\targs are reference types\n");
5698 if (param_klass->has_references || return_klass->has_references)
5701 /* Avoid mixing structs and primitive types/enums, they need to be handled differently in the JIT */
5702 if ((MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && !MONO_TYPE_ISSTRUCT (&return_klass->byval_arg)) ||
5703 (!MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && MONO_TYPE_ISSTRUCT (&return_klass->byval_arg))) {
5704 if (cfg->verbose_level > 3)
5705 printf ("[UNSAFE-MOV-INTRISIC]\tmixing structs and scalars\n");
5709 if (param_klass->byval_arg.type == MONO_TYPE_R4 || param_klass->byval_arg.type == MONO_TYPE_R8 ||
5710 return_klass->byval_arg.type == MONO_TYPE_R4 || return_klass->byval_arg.type == MONO_TYPE_R8) {
5711 if (cfg->verbose_level > 3)
5712 printf ("[UNSAFE-MOV-INTRISIC]\tfloat or double are not supported\n");
5716 param_size = mono_class_value_size (param_klass, &align);
5717 return_size = mono_class_value_size (return_klass, &align);
5719 //We can do it if sizes match
5720 if (param_size == return_size) {
5721 if (cfg->verbose_level > 3)
5722 printf ("[UNSAFE-MOV-INTRISIC]\tsame size\n");
5726 //No simple way to handle struct if sizes don't match
5727 if (MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg)) {
5728 if (cfg->verbose_level > 3)
5729 printf ("[UNSAFE-MOV-INTRISIC]\tsize mismatch and type is a struct\n");
5734 * Same reg size category.
5735 * A quick note on why we don't require widening here.
5736 * The intrinsic is "R Array.UnsafeMov<S,R> (S s)".
5738 * Since the source value comes from a function argument, the JIT will already have
5739 * the value in a VREG and performed any widening needed before (say, when loading from a field).
5741 if (param_size <= 4 && return_size <= 4) {
5742 if (cfg->verbose_level > 3)
5743 printf ("[UNSAFE-MOV-INTRISIC]\tsize mismatch but both are of the same reg class\n");
5751 emit_array_unsafe_mov (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args)
5753 MonoClass *param_klass = mono_class_from_mono_type (fsig->params [0]);
5754 MonoClass *return_klass = mono_class_from_mono_type (fsig->ret);
5756 //Valuetypes that are semantically equivalent or numbers than can be widened to
5757 if (is_unsafe_mov_compatible (cfg, param_klass, return_klass))
5760 //Arrays of valuetypes that are semantically equivalent
5761 if (param_klass->rank == 1 && return_klass->rank == 1 && is_unsafe_mov_compatible (cfg, param_klass->element_class, return_klass->element_class))
5768 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5770 #ifdef MONO_ARCH_SIMD_INTRINSICS
5771 MonoInst *ins = NULL;
5773 if (cfg->opt & MONO_OPT_SIMD) {
5774 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
5780 return mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
5784 emit_memory_barrier (MonoCompile *cfg, int kind)
5786 MonoInst *ins = NULL;
5787 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
5788 MONO_ADD_INS (cfg->cbb, ins);
5789 ins->backend.memory_barrier_kind = kind;
5795 llvm_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5797 MonoInst *ins = NULL;
5800 /* The LLVM backend supports these intrinsics */
5801 if (cmethod->klass == mono_defaults.math_class) {
5802 if (strcmp (cmethod->name, "Sin") == 0) {
5804 } else if (strcmp (cmethod->name, "Cos") == 0) {
5806 } else if (strcmp (cmethod->name, "Sqrt") == 0) {
5808 } else if (strcmp (cmethod->name, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8) {
5812 if (opcode && fsig->param_count == 1) {
5813 MONO_INST_NEW (cfg, ins, opcode);
5814 ins->type = STACK_R8;
5815 ins->dreg = mono_alloc_freg (cfg);
5816 ins->sreg1 = args [0]->dreg;
5817 MONO_ADD_INS (cfg->cbb, ins);
5821 if (cfg->opt & MONO_OPT_CMOV) {
5822 if (strcmp (cmethod->name, "Min") == 0) {
5823 if (fsig->params [0]->type == MONO_TYPE_I4)
5825 if (fsig->params [0]->type == MONO_TYPE_U4)
5826 opcode = OP_IMIN_UN;
5827 else if (fsig->params [0]->type == MONO_TYPE_I8)
5829 else if (fsig->params [0]->type == MONO_TYPE_U8)
5830 opcode = OP_LMIN_UN;
5831 } else if (strcmp (cmethod->name, "Max") == 0) {
5832 if (fsig->params [0]->type == MONO_TYPE_I4)
5834 if (fsig->params [0]->type == MONO_TYPE_U4)
5835 opcode = OP_IMAX_UN;
5836 else if (fsig->params [0]->type == MONO_TYPE_I8)
5838 else if (fsig->params [0]->type == MONO_TYPE_U8)
5839 opcode = OP_LMAX_UN;
5843 if (opcode && fsig->param_count == 2) {
5844 MONO_INST_NEW (cfg, ins, opcode);
5845 ins->type = fsig->params [0]->type == MONO_TYPE_I4 ? STACK_I4 : STACK_I8;
5846 ins->dreg = mono_alloc_ireg (cfg);
5847 ins->sreg1 = args [0]->dreg;
5848 ins->sreg2 = args [1]->dreg;
5849 MONO_ADD_INS (cfg->cbb, ins);
5857 mini_emit_inst_for_sharable_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5859 if (cmethod->klass == mono_defaults.array_class) {
5860 if (strcmp (cmethod->name, "UnsafeStore") == 0)
5861 return emit_array_unsafe_access (cfg, fsig, args, TRUE);
5862 else if (strcmp (cmethod->name, "UnsafeLoad") == 0)
5863 return emit_array_unsafe_access (cfg, fsig, args, FALSE);
5864 else if (strcmp (cmethod->name, "UnsafeMov") == 0)
5865 return emit_array_unsafe_mov (cfg, fsig, args);
5872 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5874 MonoInst *ins = NULL;
5876 static MonoClass *runtime_helpers_class = NULL;
5877 if (! runtime_helpers_class)
5878 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
5879 "System.Runtime.CompilerServices", "RuntimeHelpers");
5881 if (cmethod->klass == mono_defaults.string_class) {
5882 if (strcmp (cmethod->name, "get_Chars") == 0 && fsig->param_count + fsig->hasthis == 2) {
5883 int dreg = alloc_ireg (cfg);
5884 int index_reg = alloc_preg (cfg);
5885 int add_reg = alloc_preg (cfg);
5887 #if SIZEOF_REGISTER == 8
5888 /* The array reg is 64 bits but the index reg is only 32 */
5889 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
5891 index_reg = args [1]->dreg;
5893 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
5895 #if defined(TARGET_X86) || defined(TARGET_AMD64)
5896 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, MONO_STRUCT_OFFSET (MonoString, chars));
5897 add_reg = ins->dreg;
5898 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5901 int mult_reg = alloc_preg (cfg);
5902 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
5903 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
5904 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5905 add_reg, MONO_STRUCT_OFFSET (MonoString, chars));
5907 type_from_op (cfg, ins, NULL, NULL);
5909 } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count + fsig->hasthis == 1) {
5910 int dreg = alloc_ireg (cfg);
5911 /* Decompose later to allow more optimizations */
5912 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
5913 ins->type = STACK_I4;
5914 ins->flags |= MONO_INST_FAULT;
5915 cfg->cbb->has_array_access = TRUE;
5916 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
5921 } else if (cmethod->klass == mono_defaults.object_class) {
5923 if (strcmp (cmethod->name, "GetType") == 0 && fsig->param_count + fsig->hasthis == 1) {
5924 int dreg = alloc_ireg_ref (cfg);
5925 int vt_reg = alloc_preg (cfg);
5926 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
5927 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, MONO_STRUCT_OFFSET (MonoVTable, type));
5928 type_from_op (cfg, ins, NULL, NULL);
5931 } else if (!cfg->backend->emulate_mul_div && strcmp (cmethod->name, "InternalGetHashCode") == 0 && fsig->param_count == 1 && !mono_gc_is_moving ()) {
5932 int dreg = alloc_ireg (cfg);
5933 int t1 = alloc_ireg (cfg);
5935 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
5936 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
5937 ins->type = STACK_I4;
5940 } else if (strcmp (cmethod->name, ".ctor") == 0 && fsig->param_count == 0) {
5941 MONO_INST_NEW (cfg, ins, OP_NOP);
5942 MONO_ADD_INS (cfg->cbb, ins);
5946 } else if (cmethod->klass == mono_defaults.array_class) {
5947 if (strcmp (cmethod->name, "GetGenericValueImpl") == 0 && fsig->param_count + fsig->hasthis == 3 && !cfg->gsharedvt)
5948 return emit_array_generic_access (cfg, fsig, args, FALSE);
5949 else if (strcmp (cmethod->name, "SetGenericValueImpl") == 0 && fsig->param_count + fsig->hasthis == 3 && !cfg->gsharedvt)
5950 return emit_array_generic_access (cfg, fsig, args, TRUE);
5952 #ifndef MONO_BIG_ARRAYS
5954 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
5957 else if (((strcmp (cmethod->name, "GetLength") == 0 && fsig->param_count + fsig->hasthis == 2) ||
5958 (strcmp (cmethod->name, "GetLowerBound") == 0 && fsig->param_count + fsig->hasthis == 2)) &&
5959 args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
5960 int dreg = alloc_ireg (cfg);
5961 int bounds_reg = alloc_ireg_mp (cfg);
5962 MonoBasicBlock *end_bb, *szarray_bb;
5963 gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
5965 NEW_BBLOCK (cfg, end_bb);
5966 NEW_BBLOCK (cfg, szarray_bb);
5968 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
5969 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
5970 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
5971 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
5972 /* Non-szarray case */
5974 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5975 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5977 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5978 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5979 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
5980 MONO_START_BB (cfg, szarray_bb);
5983 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5984 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
5986 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
5987 MONO_START_BB (cfg, end_bb);
5989 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
5990 ins->type = STACK_I4;
5996 if (cmethod->name [0] != 'g')
5999 if (strcmp (cmethod->name, "get_Rank") == 0 && fsig->param_count + fsig->hasthis == 1) {
6000 int dreg = alloc_ireg (cfg);
6001 int vtable_reg = alloc_preg (cfg);
6002 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
6003 args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
6004 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
6005 vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
6006 type_from_op (cfg, ins, NULL, NULL);
6009 } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count + fsig->hasthis == 1) {
6010 int dreg = alloc_ireg (cfg);
6012 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
6013 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
6014 type_from_op (cfg, ins, NULL, NULL);
6019 } else if (cmethod->klass == runtime_helpers_class) {
6021 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0 && fsig->param_count == 0) {
6022 EMIT_NEW_ICONST (cfg, ins, MONO_STRUCT_OFFSET (MonoString, chars));
6026 } else if (cmethod->klass == mono_defaults.thread_class) {
6027 if (strcmp (cmethod->name, "SpinWait_nop") == 0 && fsig->param_count == 0) {
6028 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
6029 MONO_ADD_INS (cfg->cbb, ins);
6031 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0) {
6032 return emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6033 } else if (!strcmp (cmethod->name, "VolatileRead") && fsig->param_count == 1) {
6035 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
6037 if (fsig->params [0]->type == MONO_TYPE_I1)
6038 opcode = OP_LOADI1_MEMBASE;
6039 else if (fsig->params [0]->type == MONO_TYPE_U1)
6040 opcode = OP_LOADU1_MEMBASE;
6041 else if (fsig->params [0]->type == MONO_TYPE_I2)
6042 opcode = OP_LOADI2_MEMBASE;
6043 else if (fsig->params [0]->type == MONO_TYPE_U2)
6044 opcode = OP_LOADU2_MEMBASE;
6045 else if (fsig->params [0]->type == MONO_TYPE_I4)
6046 opcode = OP_LOADI4_MEMBASE;
6047 else if (fsig->params [0]->type == MONO_TYPE_U4)
6048 opcode = OP_LOADU4_MEMBASE;
6049 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
6050 opcode = OP_LOADI8_MEMBASE;
6051 else if (fsig->params [0]->type == MONO_TYPE_R4)
6052 opcode = OP_LOADR4_MEMBASE;
6053 else if (fsig->params [0]->type == MONO_TYPE_R8)
6054 opcode = OP_LOADR8_MEMBASE;
6055 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
6056 opcode = OP_LOAD_MEMBASE;
6059 MONO_INST_NEW (cfg, ins, opcode);
6060 ins->inst_basereg = args [0]->dreg;
6061 ins->inst_offset = 0;
6062 MONO_ADD_INS (cfg->cbb, ins);
6064 switch (fsig->params [0]->type) {
6071 ins->dreg = mono_alloc_ireg (cfg);
6072 ins->type = STACK_I4;
6076 ins->dreg = mono_alloc_lreg (cfg);
6077 ins->type = STACK_I8;
6081 ins->dreg = mono_alloc_ireg (cfg);
6082 #if SIZEOF_REGISTER == 8
6083 ins->type = STACK_I8;
6085 ins->type = STACK_I4;
6090 ins->dreg = mono_alloc_freg (cfg);
6091 ins->type = STACK_R8;
6094 g_assert (mini_type_is_reference (fsig->params [0]));
6095 ins->dreg = mono_alloc_ireg_ref (cfg);
6096 ins->type = STACK_OBJ;
6100 if (opcode == OP_LOADI8_MEMBASE)
6101 ins = mono_decompose_opcode (cfg, ins);
6103 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
6107 } else if (!strcmp (cmethod->name, "VolatileWrite") && fsig->param_count == 2) {
6109 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
6111 if (fsig->params [0]->type == MONO_TYPE_I1 || fsig->params [0]->type == MONO_TYPE_U1)
6112 opcode = OP_STOREI1_MEMBASE_REG;
6113 else if (fsig->params [0]->type == MONO_TYPE_I2 || fsig->params [0]->type == MONO_TYPE_U2)
6114 opcode = OP_STOREI2_MEMBASE_REG;
6115 else if (fsig->params [0]->type == MONO_TYPE_I4 || fsig->params [0]->type == MONO_TYPE_U4)
6116 opcode = OP_STOREI4_MEMBASE_REG;
6117 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
6118 opcode = OP_STOREI8_MEMBASE_REG;
6119 else if (fsig->params [0]->type == MONO_TYPE_R4)
6120 opcode = OP_STORER4_MEMBASE_REG;
6121 else if (fsig->params [0]->type == MONO_TYPE_R8)
6122 opcode = OP_STORER8_MEMBASE_REG;
6123 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
6124 opcode = OP_STORE_MEMBASE_REG;
6127 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
6129 MONO_INST_NEW (cfg, ins, opcode);
6130 ins->sreg1 = args [1]->dreg;
6131 ins->inst_destbasereg = args [0]->dreg;
6132 ins->inst_offset = 0;
6133 MONO_ADD_INS (cfg->cbb, ins);
6135 if (opcode == OP_STOREI8_MEMBASE_REG)
6136 ins = mono_decompose_opcode (cfg, ins);
6141 } else if (cmethod->klass->image == mono_defaults.corlib &&
6142 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
6143 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
6146 #if SIZEOF_REGISTER == 8
6147 if (!cfg->llvm_only && strcmp (cmethod->name, "Read") == 0 && fsig->param_count == 1 && (fsig->params [0]->type == MONO_TYPE_I8)) {
6148 if (!cfg->llvm_only && mono_arch_opcode_supported (OP_ATOMIC_LOAD_I8)) {
6149 MONO_INST_NEW (cfg, ins, OP_ATOMIC_LOAD_I8);
6150 ins->dreg = mono_alloc_preg (cfg);
6151 ins->sreg1 = args [0]->dreg;
6152 ins->type = STACK_I8;
6153 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_SEQ;
6154 MONO_ADD_INS (cfg->cbb, ins);
6158 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6160 /* 64 bit reads are already atomic */
6161 MONO_INST_NEW (cfg, load_ins, OP_LOADI8_MEMBASE);
6162 load_ins->dreg = mono_alloc_preg (cfg);
6163 load_ins->inst_basereg = args [0]->dreg;
6164 load_ins->inst_offset = 0;
6165 load_ins->type = STACK_I8;
6166 MONO_ADD_INS (cfg->cbb, load_ins);
6168 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6175 if (strcmp (cmethod->name, "Increment") == 0 && fsig->param_count == 1) {
6176 MonoInst *ins_iconst;
6179 if (fsig->params [0]->type == MONO_TYPE_I4) {
6180 opcode = OP_ATOMIC_ADD_I4;
6181 cfg->has_atomic_add_i4 = TRUE;
6183 #if SIZEOF_REGISTER == 8
6184 else if (fsig->params [0]->type == MONO_TYPE_I8)
6185 opcode = OP_ATOMIC_ADD_I8;
6188 if (!mono_arch_opcode_supported (opcode))
6190 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
6191 ins_iconst->inst_c0 = 1;
6192 ins_iconst->dreg = mono_alloc_ireg (cfg);
6193 MONO_ADD_INS (cfg->cbb, ins_iconst);
6195 MONO_INST_NEW (cfg, ins, opcode);
6196 ins->dreg = mono_alloc_ireg (cfg);
6197 ins->inst_basereg = args [0]->dreg;
6198 ins->inst_offset = 0;
6199 ins->sreg2 = ins_iconst->dreg;
6200 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
6201 MONO_ADD_INS (cfg->cbb, ins);
6203 } else if (strcmp (cmethod->name, "Decrement") == 0 && fsig->param_count == 1) {
6204 MonoInst *ins_iconst;
6207 if (fsig->params [0]->type == MONO_TYPE_I4) {
6208 opcode = OP_ATOMIC_ADD_I4;
6209 cfg->has_atomic_add_i4 = TRUE;
6211 #if SIZEOF_REGISTER == 8
6212 else if (fsig->params [0]->type == MONO_TYPE_I8)
6213 opcode = OP_ATOMIC_ADD_I8;
6216 if (!mono_arch_opcode_supported (opcode))
6218 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
6219 ins_iconst->inst_c0 = -1;
6220 ins_iconst->dreg = mono_alloc_ireg (cfg);
6221 MONO_ADD_INS (cfg->cbb, ins_iconst);
6223 MONO_INST_NEW (cfg, ins, opcode);
6224 ins->dreg = mono_alloc_ireg (cfg);
6225 ins->inst_basereg = args [0]->dreg;
6226 ins->inst_offset = 0;
6227 ins->sreg2 = ins_iconst->dreg;
6228 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
6229 MONO_ADD_INS (cfg->cbb, ins);
6231 } else if (strcmp (cmethod->name, "Add") == 0 && fsig->param_count == 2) {
6234 if (fsig->params [0]->type == MONO_TYPE_I4) {
6235 opcode = OP_ATOMIC_ADD_I4;
6236 cfg->has_atomic_add_i4 = TRUE;
6238 #if SIZEOF_REGISTER == 8
6239 else if (fsig->params [0]->type == MONO_TYPE_I8)
6240 opcode = OP_ATOMIC_ADD_I8;
6243 if (!mono_arch_opcode_supported (opcode))
6245 MONO_INST_NEW (cfg, ins, opcode);
6246 ins->dreg = mono_alloc_ireg (cfg);
6247 ins->inst_basereg = args [0]->dreg;
6248 ins->inst_offset = 0;
6249 ins->sreg2 = args [1]->dreg;
6250 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
6251 MONO_ADD_INS (cfg->cbb, ins);
6254 else if (strcmp (cmethod->name, "Exchange") == 0 && fsig->param_count == 2) {
6255 MonoInst *f2i = NULL, *i2f;
6256 guint32 opcode, f2i_opcode, i2f_opcode;
6257 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
6258 gboolean is_float = fsig->params [0]->type == MONO_TYPE_R4 || fsig->params [0]->type == MONO_TYPE_R8;
6260 if (fsig->params [0]->type == MONO_TYPE_I4 ||
6261 fsig->params [0]->type == MONO_TYPE_R4) {
6262 opcode = OP_ATOMIC_EXCHANGE_I4;
6263 f2i_opcode = OP_MOVE_F_TO_I4;
6264 i2f_opcode = OP_MOVE_I4_TO_F;
6265 cfg->has_atomic_exchange_i4 = TRUE;
6267 #if SIZEOF_REGISTER == 8
6269 fsig->params [0]->type == MONO_TYPE_I8 ||
6270 fsig->params [0]->type == MONO_TYPE_R8 ||
6271 fsig->params [0]->type == MONO_TYPE_I) {
6272 opcode = OP_ATOMIC_EXCHANGE_I8;
6273 f2i_opcode = OP_MOVE_F_TO_I8;
6274 i2f_opcode = OP_MOVE_I8_TO_F;
6277 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I) {
6278 opcode = OP_ATOMIC_EXCHANGE_I4;
6279 cfg->has_atomic_exchange_i4 = TRUE;
6285 if (!mono_arch_opcode_supported (opcode))
6289 /* TODO: Decompose these opcodes instead of bailing here. */
6290 if (COMPILE_SOFT_FLOAT (cfg))
6293 MONO_INST_NEW (cfg, f2i, f2i_opcode);
6294 f2i->dreg = mono_alloc_ireg (cfg);
6295 f2i->sreg1 = args [1]->dreg;
6296 if (f2i_opcode == OP_MOVE_F_TO_I4)
6297 f2i->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6298 MONO_ADD_INS (cfg->cbb, f2i);
6301 MONO_INST_NEW (cfg, ins, opcode);
6302 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : mono_alloc_ireg (cfg);
6303 ins->inst_basereg = args [0]->dreg;
6304 ins->inst_offset = 0;
6305 ins->sreg2 = is_float ? f2i->dreg : args [1]->dreg;
6306 MONO_ADD_INS (cfg->cbb, ins);
6308 switch (fsig->params [0]->type) {
6310 ins->type = STACK_I4;
6313 ins->type = STACK_I8;
6316 #if SIZEOF_REGISTER == 8
6317 ins->type = STACK_I8;
6319 ins->type = STACK_I4;
6324 ins->type = STACK_R8;
6327 g_assert (mini_type_is_reference (fsig->params [0]));
6328 ins->type = STACK_OBJ;
6333 MONO_INST_NEW (cfg, i2f, i2f_opcode);
6334 i2f->dreg = mono_alloc_freg (cfg);
6335 i2f->sreg1 = ins->dreg;
6336 i2f->type = STACK_R8;
6337 if (i2f_opcode == OP_MOVE_I4_TO_F)
6338 i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6339 MONO_ADD_INS (cfg->cbb, i2f);
6344 if (cfg->gen_write_barriers && is_ref)
6345 emit_write_barrier (cfg, args [0], args [1]);
6347 else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 3) {
6348 MonoInst *f2i_new = NULL, *f2i_cmp = NULL, *i2f;
6349 guint32 opcode, f2i_opcode, i2f_opcode;
6350 gboolean is_ref = mini_type_is_reference (fsig->params [1]);
6351 gboolean is_float = fsig->params [1]->type == MONO_TYPE_R4 || fsig->params [1]->type == MONO_TYPE_R8;
6353 if (fsig->params [1]->type == MONO_TYPE_I4 ||
6354 fsig->params [1]->type == MONO_TYPE_R4) {
6355 opcode = OP_ATOMIC_CAS_I4;
6356 f2i_opcode = OP_MOVE_F_TO_I4;
6357 i2f_opcode = OP_MOVE_I4_TO_F;
6358 cfg->has_atomic_cas_i4 = TRUE;
6360 #if SIZEOF_REGISTER == 8
6362 fsig->params [1]->type == MONO_TYPE_I8 ||
6363 fsig->params [1]->type == MONO_TYPE_R8 ||
6364 fsig->params [1]->type == MONO_TYPE_I) {
6365 opcode = OP_ATOMIC_CAS_I8;
6366 f2i_opcode = OP_MOVE_F_TO_I8;
6367 i2f_opcode = OP_MOVE_I8_TO_F;
6370 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I) {
6371 opcode = OP_ATOMIC_CAS_I4;
6372 cfg->has_atomic_cas_i4 = TRUE;
6378 if (!mono_arch_opcode_supported (opcode))
6382 /* TODO: Decompose these opcodes instead of bailing here. */
6383 if (COMPILE_SOFT_FLOAT (cfg))
6386 MONO_INST_NEW (cfg, f2i_new, f2i_opcode);
6387 f2i_new->dreg = mono_alloc_ireg (cfg);
6388 f2i_new->sreg1 = args [1]->dreg;
6389 if (f2i_opcode == OP_MOVE_F_TO_I4)
6390 f2i_new->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6391 MONO_ADD_INS (cfg->cbb, f2i_new);
6393 MONO_INST_NEW (cfg, f2i_cmp, f2i_opcode);
6394 f2i_cmp->dreg = mono_alloc_ireg (cfg);
6395 f2i_cmp->sreg1 = args [2]->dreg;
6396 if (f2i_opcode == OP_MOVE_F_TO_I4)
6397 f2i_cmp->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6398 MONO_ADD_INS (cfg->cbb, f2i_cmp);
6401 MONO_INST_NEW (cfg, ins, opcode);
6402 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
6403 ins->sreg1 = args [0]->dreg;
6404 ins->sreg2 = is_float ? f2i_new->dreg : args [1]->dreg;
6405 ins->sreg3 = is_float ? f2i_cmp->dreg : args [2]->dreg;
6406 MONO_ADD_INS (cfg->cbb, ins);
6408 switch (fsig->params [1]->type) {
6410 ins->type = STACK_I4;
6413 ins->type = STACK_I8;
6416 #if SIZEOF_REGISTER == 8
6417 ins->type = STACK_I8;
6419 ins->type = STACK_I4;
6423 ins->type = cfg->r4_stack_type;
6426 ins->type = STACK_R8;
6429 g_assert (mini_type_is_reference (fsig->params [1]));
6430 ins->type = STACK_OBJ;
6435 MONO_INST_NEW (cfg, i2f, i2f_opcode);
6436 i2f->dreg = mono_alloc_freg (cfg);
6437 i2f->sreg1 = ins->dreg;
6438 i2f->type = STACK_R8;
6439 if (i2f_opcode == OP_MOVE_I4_TO_F)
6440 i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6441 MONO_ADD_INS (cfg->cbb, i2f);
6446 if (cfg->gen_write_barriers && is_ref)
6447 emit_write_barrier (cfg, args [0], args [1]);
6449 else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 4 &&
6450 fsig->params [1]->type == MONO_TYPE_I4) {
6451 MonoInst *cmp, *ceq;
6453 if (!mono_arch_opcode_supported (OP_ATOMIC_CAS_I4))
6456 /* int32 r = CAS (location, value, comparand); */
6457 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
6458 ins->dreg = alloc_ireg (cfg);
6459 ins->sreg1 = args [0]->dreg;
6460 ins->sreg2 = args [1]->dreg;
6461 ins->sreg3 = args [2]->dreg;
6462 ins->type = STACK_I4;
6463 MONO_ADD_INS (cfg->cbb, ins);
6465 /* bool result = r == comparand; */
6466 MONO_INST_NEW (cfg, cmp, OP_ICOMPARE);
6467 cmp->sreg1 = ins->dreg;
6468 cmp->sreg2 = args [2]->dreg;
6469 cmp->type = STACK_I4;
6470 MONO_ADD_INS (cfg->cbb, cmp);
6472 MONO_INST_NEW (cfg, ceq, OP_ICEQ);
6473 ceq->dreg = alloc_ireg (cfg);
6474 ceq->type = STACK_I4;
6475 MONO_ADD_INS (cfg->cbb, ceq);
6477 /* *success = result; */
6478 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, args [3]->dreg, 0, ceq->dreg);
6480 cfg->has_atomic_cas_i4 = TRUE;
6482 else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0)
6483 ins = emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6487 } else if (cmethod->klass->image == mono_defaults.corlib &&
6488 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
6489 (strcmp (cmethod->klass->name, "Volatile") == 0)) {
6492 if (!cfg->llvm_only && !strcmp (cmethod->name, "Read") && fsig->param_count == 1) {
6494 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
6495 gboolean is_float = fsig->params [0]->type == MONO_TYPE_R4 || fsig->params [0]->type == MONO_TYPE_R8;
6497 if (fsig->params [0]->type == MONO_TYPE_I1)
6498 opcode = OP_ATOMIC_LOAD_I1;
6499 else if (fsig->params [0]->type == MONO_TYPE_U1 || fsig->params [0]->type == MONO_TYPE_BOOLEAN)
6500 opcode = OP_ATOMIC_LOAD_U1;
6501 else if (fsig->params [0]->type == MONO_TYPE_I2)
6502 opcode = OP_ATOMIC_LOAD_I2;
6503 else if (fsig->params [0]->type == MONO_TYPE_U2)
6504 opcode = OP_ATOMIC_LOAD_U2;
6505 else if (fsig->params [0]->type == MONO_TYPE_I4)
6506 opcode = OP_ATOMIC_LOAD_I4;
6507 else if (fsig->params [0]->type == MONO_TYPE_U4)
6508 opcode = OP_ATOMIC_LOAD_U4;
6509 else if (fsig->params [0]->type == MONO_TYPE_R4)
6510 opcode = OP_ATOMIC_LOAD_R4;
6511 else if (fsig->params [0]->type == MONO_TYPE_R8)
6512 opcode = OP_ATOMIC_LOAD_R8;
6513 #if SIZEOF_REGISTER == 8
6514 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_I)
6515 opcode = OP_ATOMIC_LOAD_I8;
6516 else if (is_ref || fsig->params [0]->type == MONO_TYPE_U8 || fsig->params [0]->type == MONO_TYPE_U)
6517 opcode = OP_ATOMIC_LOAD_U8;
6519 else if (fsig->params [0]->type == MONO_TYPE_I)
6520 opcode = OP_ATOMIC_LOAD_I4;
6521 else if (is_ref || fsig->params [0]->type == MONO_TYPE_U)
6522 opcode = OP_ATOMIC_LOAD_U4;
6526 if (!mono_arch_opcode_supported (opcode))
6529 MONO_INST_NEW (cfg, ins, opcode);
6530 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : (is_float ? mono_alloc_freg (cfg) : mono_alloc_ireg (cfg));
6531 ins->sreg1 = args [0]->dreg;
6532 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_ACQ;
6533 MONO_ADD_INS (cfg->cbb, ins);
6535 switch (fsig->params [0]->type) {
6536 case MONO_TYPE_BOOLEAN:
6543 ins->type = STACK_I4;
6547 ins->type = STACK_I8;
6551 #if SIZEOF_REGISTER == 8
6552 ins->type = STACK_I8;
6554 ins->type = STACK_I4;
6558 ins->type = cfg->r4_stack_type;
6561 ins->type = STACK_R8;
6564 g_assert (mini_type_is_reference (fsig->params [0]));
6565 ins->type = STACK_OBJ;
6571 if (!cfg->llvm_only && !strcmp (cmethod->name, "Write") && fsig->param_count == 2) {
6573 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
6575 if (fsig->params [0]->type == MONO_TYPE_I1)
6576 opcode = OP_ATOMIC_STORE_I1;
6577 else if (fsig->params [0]->type == MONO_TYPE_U1 || fsig->params [0]->type == MONO_TYPE_BOOLEAN)
6578 opcode = OP_ATOMIC_STORE_U1;
6579 else if (fsig->params [0]->type == MONO_TYPE_I2)
6580 opcode = OP_ATOMIC_STORE_I2;
6581 else if (fsig->params [0]->type == MONO_TYPE_U2)
6582 opcode = OP_ATOMIC_STORE_U2;
6583 else if (fsig->params [0]->type == MONO_TYPE_I4)
6584 opcode = OP_ATOMIC_STORE_I4;
6585 else if (fsig->params [0]->type == MONO_TYPE_U4)
6586 opcode = OP_ATOMIC_STORE_U4;
6587 else if (fsig->params [0]->type == MONO_TYPE_R4)
6588 opcode = OP_ATOMIC_STORE_R4;
6589 else if (fsig->params [0]->type == MONO_TYPE_R8)
6590 opcode = OP_ATOMIC_STORE_R8;
6591 #if SIZEOF_REGISTER == 8
6592 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_I)
6593 opcode = OP_ATOMIC_STORE_I8;
6594 else if (is_ref || fsig->params [0]->type == MONO_TYPE_U8 || fsig->params [0]->type == MONO_TYPE_U)
6595 opcode = OP_ATOMIC_STORE_U8;
6597 else if (fsig->params [0]->type == MONO_TYPE_I)
6598 opcode = OP_ATOMIC_STORE_I4;
6599 else if (is_ref || fsig->params [0]->type == MONO_TYPE_U)
6600 opcode = OP_ATOMIC_STORE_U4;
6604 if (!mono_arch_opcode_supported (opcode))
6607 MONO_INST_NEW (cfg, ins, opcode);
6608 ins->dreg = args [0]->dreg;
6609 ins->sreg1 = args [1]->dreg;
6610 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_REL;
6611 MONO_ADD_INS (cfg->cbb, ins);
6613 if (cfg->gen_write_barriers && is_ref)
6614 emit_write_barrier (cfg, args [0], args [1]);
6620 } else if (cmethod->klass->image == mono_defaults.corlib &&
6621 (strcmp (cmethod->klass->name_space, "System.Diagnostics") == 0) &&
6622 (strcmp (cmethod->klass->name, "Debugger") == 0)) {
6623 if (!strcmp (cmethod->name, "Break") && fsig->param_count == 0) {
6624 if (should_insert_brekpoint (cfg->method)) {
6625 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
6627 MONO_INST_NEW (cfg, ins, OP_NOP);
6628 MONO_ADD_INS (cfg->cbb, ins);
6632 } else if (cmethod->klass->image == mono_defaults.corlib &&
6633 (strcmp (cmethod->klass->name_space, "System") == 0) &&
6634 (strcmp (cmethod->klass->name, "Environment") == 0)) {
6635 if (!strcmp (cmethod->name, "get_IsRunningOnWindows") && fsig->param_count == 0) {
6637 EMIT_NEW_ICONST (cfg, ins, 1);
6639 EMIT_NEW_ICONST (cfg, ins, 0);
6642 } else if (cmethod->klass->image == mono_defaults.corlib &&
6643 (strcmp (cmethod->klass->name_space, "System.Reflection") == 0) &&
6644 (strcmp (cmethod->klass->name, "Assembly") == 0)) {
6645 if (cfg->llvm_only && !strcmp (cmethod->name, "GetExecutingAssembly")) {
6646 /* No stack walks are current available, so implement this as an intrinsic */
6647 MonoInst *assembly_ins;
6649 EMIT_NEW_AOTCONST (cfg, assembly_ins, MONO_PATCH_INFO_IMAGE, cfg->method->klass->image);
6650 ins = mono_emit_jit_icall (cfg, mono_get_assembly_object, &assembly_ins);
6653 } else if (cmethod->klass == mono_defaults.math_class) {
6655 * There is general branchless code for Min/Max, but it does not work for
6657 * http://everything2.com/?node_id=1051618
6659 } else if (((!strcmp (cmethod->klass->image->assembly->aname.name, "MonoMac") ||
6660 !strcmp (cmethod->klass->image->assembly->aname.name, "monotouch")) &&
6661 !strcmp (cmethod->klass->name_space, "XamCore.ObjCRuntime") &&
6662 !strcmp (cmethod->klass->name, "Selector")) ||
6663 (!strcmp (cmethod->klass->image->assembly->aname.name, "Xamarin.iOS") &&
6664 !strcmp (cmethod->klass->name_space, "ObjCRuntime") &&
6665 !strcmp (cmethod->klass->name, "Selector"))
6667 if (cfg->backend->have_objc_get_selector &&
6668 !strcmp (cmethod->name, "GetHandle") && fsig->param_count == 1 &&
6669 (args [0]->opcode == OP_GOT_ENTRY || args [0]->opcode == OP_AOTCONST) &&
6672 MonoJumpInfoToken *ji;
6675 cfg->disable_llvm = TRUE;
6677 if (args [0]->opcode == OP_GOT_ENTRY) {
6678 pi = args [0]->inst_p1;
6679 g_assert (pi->opcode == OP_PATCH_INFO);
6680 g_assert (GPOINTER_TO_INT (pi->inst_p1) == MONO_PATCH_INFO_LDSTR);
6683 g_assert (GPOINTER_TO_INT (args [0]->inst_p1) == MONO_PATCH_INFO_LDSTR);
6684 ji = args [0]->inst_p0;
6687 NULLIFY_INS (args [0]);
6690 s = mono_ldstr (cfg->domain, ji->image, mono_metadata_token_index (ji->token));
6691 MONO_INST_NEW (cfg, ins, OP_OBJC_GET_SELECTOR);
6692 ins->dreg = mono_alloc_ireg (cfg);
6694 ins->inst_p0 = mono_string_to_utf8 (s);
6695 MONO_ADD_INS (cfg->cbb, ins);
6700 #ifdef MONO_ARCH_SIMD_INTRINSICS
6701 if (cfg->opt & MONO_OPT_SIMD) {
6702 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
6708 ins = mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
6712 if (COMPILE_LLVM (cfg)) {
6713 ins = llvm_emit_inst_for_method (cfg, cmethod, fsig, args);
6718 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
6722 * This entry point could be used later for arbitrary method
6725 inline static MonoInst*
6726 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
6727 MonoMethodSignature *signature, MonoInst **args, MonoInst *this_ins)
6729 if (method->klass == mono_defaults.string_class) {
6730 /* managed string allocation support */
6731 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS) && !(cfg->opt & MONO_OPT_SHARED)) {
6732 MonoInst *iargs [2];
6733 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
6734 MonoMethod *managed_alloc = NULL;
6736 g_assert (vtable); /*Should not fail since it System.String*/
6737 #ifndef MONO_CROSS_COMPILE
6738 managed_alloc = mono_gc_get_managed_allocator (method->klass, FALSE, FALSE);
6742 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
6743 iargs [1] = args [0];
6744 return mono_emit_method_call (cfg, managed_alloc, iargs, this_ins);
6751 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
6753 MonoInst *store, *temp;
6756 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
6757 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
6760 * FIXME: We should use *args++ = sp [0], but that would mean the arg
6761 * would be different than the MonoInst's used to represent arguments, and
6762 * the ldelema implementation can't deal with that.
6763 * Solution: When ldelema is used on an inline argument, create a var for
6764 * it, emit ldelema on that var, and emit the saving code below in
6765 * inline_method () if needed.
6767 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
6768 cfg->args [i] = temp;
6769 /* This uses cfg->args [i] which is set by the preceeding line */
6770 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
6771 store->cil_code = sp [0]->cil_code;
6776 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
6777 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
6779 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
6781 check_inline_called_method_name_limit (MonoMethod *called_method)
6784 static const char *limit = NULL;
6786 if (limit == NULL) {
6787 const char *limit_string = g_getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
6789 if (limit_string != NULL)
6790 limit = limit_string;
6795 if (limit [0] != '\0') {
6796 char *called_method_name = mono_method_full_name (called_method, TRUE);
6798 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
6799 g_free (called_method_name);
6801 //return (strncmp_result <= 0);
6802 return (strncmp_result == 0);
6809 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
6811 check_inline_caller_method_name_limit (MonoMethod *caller_method)
6814 static const char *limit = NULL;
6816 if (limit == NULL) {
6817 const char *limit_string = g_getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
6818 if (limit_string != NULL) {
6819 limit = limit_string;
6825 if (limit [0] != '\0') {
6826 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
6828 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
6829 g_free (caller_method_name);
6831 //return (strncmp_result <= 0);
6832 return (strncmp_result == 0);
6840 emit_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
6842 static double r8_0 = 0.0;
6843 static float r4_0 = 0.0;
6847 rtype = mini_get_underlying_type (rtype);
6851 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
6852 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
6853 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
6854 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
6855 MONO_EMIT_NEW_I8CONST (cfg, dreg, 0);
6856 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
6857 MONO_INST_NEW (cfg, ins, OP_R4CONST);
6858 ins->type = STACK_R4;
6859 ins->inst_p0 = (void*)&r4_0;
6861 MONO_ADD_INS (cfg->cbb, ins);
6862 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
6863 MONO_INST_NEW (cfg, ins, OP_R8CONST);
6864 ins->type = STACK_R8;
6865 ins->inst_p0 = (void*)&r8_0;
6867 MONO_ADD_INS (cfg->cbb, ins);
6868 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
6869 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
6870 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
6871 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (rtype)) {
6872 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
6874 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
6879 emit_dummy_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
6883 rtype = mini_get_underlying_type (rtype);
6887 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_PCONST);
6888 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
6889 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_ICONST);
6890 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
6891 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_I8CONST);
6892 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
6893 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R4CONST);
6894 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
6895 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R8CONST);
6896 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
6897 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
6898 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
6899 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (rtype)) {
6900 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
6902 emit_init_rvar (cfg, dreg, rtype);
6906 /* If INIT is FALSE, emit dummy initialization statements to keep the IR valid */
6908 emit_init_local (MonoCompile *cfg, int local, MonoType *type, gboolean init)
6910 MonoInst *var = cfg->locals [local];
6911 if (COMPILE_SOFT_FLOAT (cfg)) {
6913 int reg = alloc_dreg (cfg, var->type);
6914 emit_init_rvar (cfg, reg, type);
6915 EMIT_NEW_LOCSTORE (cfg, store, local, cfg->cbb->last_ins);
6918 emit_init_rvar (cfg, var->dreg, type);
6920 emit_dummy_init_rvar (cfg, var->dreg, type);
6927 * Return the cost of inlining CMETHOD.
6930 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
6931 guchar *ip, guint real_offset, gboolean inline_always)
6933 MonoInst *ins, *rvar = NULL;
6934 MonoMethodHeader *cheader;
6935 MonoBasicBlock *ebblock, *sbblock;
6937 MonoMethod *prev_inlined_method;
6938 MonoInst **prev_locals, **prev_args;
6939 MonoType **prev_arg_types;
6940 guint prev_real_offset;
6941 GHashTable *prev_cbb_hash;
6942 MonoBasicBlock **prev_cil_offset_to_bb;
6943 MonoBasicBlock *prev_cbb;
6944 unsigned char* prev_cil_start;
6945 guint32 prev_cil_offset_to_bb_len;
6946 MonoMethod *prev_current_method;
6947 MonoGenericContext *prev_generic_context;
6948 gboolean ret_var_set, prev_ret_var_set, prev_disable_inline, virtual = FALSE;
6950 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
6952 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
6953 if ((! inline_always) && ! check_inline_called_method_name_limit (cmethod))
6956 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
6957 if ((! inline_always) && ! check_inline_caller_method_name_limit (cfg->method))
6962 fsig = mono_method_signature (cmethod);
6964 if (cfg->verbose_level > 2)
6965 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
6967 if (!cmethod->inline_info) {
6968 cfg->stat_inlineable_methods++;
6969 cmethod->inline_info = 1;
6972 /* allocate local variables */
6973 cheader = mono_method_get_header (cmethod);
6975 if (cheader == NULL || mono_loader_get_last_error ()) {
6976 MonoLoaderError *error = mono_loader_get_last_error ();
6979 mono_metadata_free_mh (cheader);
6980 if (inline_always && error)
6981 mono_cfg_set_exception (cfg, error->exception_type);
6983 mono_loader_clear_error ();
6987 /*Must verify before creating locals as it can cause the JIT to assert.*/
6988 if (mono_compile_is_broken (cfg, cmethod, FALSE)) {
6989 mono_metadata_free_mh (cheader);
6993 /* allocate space to store the return value */
6994 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6995 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
6998 prev_locals = cfg->locals;
6999 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
7000 for (i = 0; i < cheader->num_locals; ++i)
7001 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
7003 /* allocate start and end blocks */
7004 /* This is needed so if the inline is aborted, we can clean up */
7005 NEW_BBLOCK (cfg, sbblock);
7006 sbblock->real_offset = real_offset;
7008 NEW_BBLOCK (cfg, ebblock);
7009 ebblock->block_num = cfg->num_bblocks++;
7010 ebblock->real_offset = real_offset;
7012 prev_args = cfg->args;
7013 prev_arg_types = cfg->arg_types;
7014 prev_inlined_method = cfg->inlined_method;
7015 cfg->inlined_method = cmethod;
7016 cfg->ret_var_set = FALSE;
7017 cfg->inline_depth ++;
7018 prev_real_offset = cfg->real_offset;
7019 prev_cbb_hash = cfg->cbb_hash;
7020 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
7021 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
7022 prev_cil_start = cfg->cil_start;
7023 prev_cbb = cfg->cbb;
7024 prev_current_method = cfg->current_method;
7025 prev_generic_context = cfg->generic_context;
7026 prev_ret_var_set = cfg->ret_var_set;
7027 prev_disable_inline = cfg->disable_inline;
7029 if (ip && *ip == CEE_CALLVIRT && !(cmethod->flags & METHOD_ATTRIBUTE_STATIC))
7032 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, sp, real_offset, virtual);
7034 ret_var_set = cfg->ret_var_set;
7036 cfg->inlined_method = prev_inlined_method;
7037 cfg->real_offset = prev_real_offset;
7038 cfg->cbb_hash = prev_cbb_hash;
7039 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
7040 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
7041 cfg->cil_start = prev_cil_start;
7042 cfg->locals = prev_locals;
7043 cfg->args = prev_args;
7044 cfg->arg_types = prev_arg_types;
7045 cfg->current_method = prev_current_method;
7046 cfg->generic_context = prev_generic_context;
7047 cfg->ret_var_set = prev_ret_var_set;
7048 cfg->disable_inline = prev_disable_inline;
7049 cfg->inline_depth --;
7051 if ((costs >= 0 && costs < 60) || inline_always || (costs >= 0 && (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))) {
7052 if (cfg->verbose_level > 2)
7053 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
7055 cfg->stat_inlined_methods++;
7057 /* always add some code to avoid block split failures */
7058 MONO_INST_NEW (cfg, ins, OP_NOP);
7059 MONO_ADD_INS (prev_cbb, ins);
7061 prev_cbb->next_bb = sbblock;
7062 link_bblock (cfg, prev_cbb, sbblock);
7065 * Get rid of the begin and end bblocks if possible to aid local
7068 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
7070 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
7071 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
7073 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
7074 MonoBasicBlock *prev = ebblock->in_bb [0];
7075 mono_merge_basic_blocks (cfg, prev, ebblock);
7077 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
7078 mono_merge_basic_blocks (cfg, prev_cbb, prev);
7079 cfg->cbb = prev_cbb;
7083 * Its possible that the rvar is set in some prev bblock, but not in others.
7089 for (i = 0; i < ebblock->in_count; ++i) {
7090 bb = ebblock->in_bb [i];
7092 if (bb->last_ins && bb->last_ins->opcode == OP_NOT_REACHED) {
7095 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
7105 * If the inlined method contains only a throw, then the ret var is not
7106 * set, so set it to a dummy value.
7109 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
7111 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
7114 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
7117 if (cfg->verbose_level > 2)
7118 printf ("INLINE ABORTED %s (cost %d)\n", mono_method_full_name (cmethod, TRUE), costs);
7119 cfg->exception_type = MONO_EXCEPTION_NONE;
7120 mono_loader_clear_error ();
7122 /* This gets rid of the newly added bblocks */
7123 cfg->cbb = prev_cbb;
7125 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
7130 * Some of these comments may well be out-of-date.
7131 * Design decisions: we do a single pass over the IL code (and we do bblock
7132 * splitting/merging in the few cases when it's required: a back jump to an IL
7133 * address that was not already seen as bblock starting point).
7134 * Code is validated as we go (full verification is still better left to metadata/verify.c).
7135 * Complex operations are decomposed in simpler ones right away. We need to let the
7136 * arch-specific code peek and poke inside this process somehow (except when the
7137 * optimizations can take advantage of the full semantic info of coarse opcodes).
7138 * All the opcodes of the form opcode.s are 'normalized' to opcode.
7139 * MonoInst->opcode initially is the IL opcode or some simplification of that
7140 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
7141 * opcode with value bigger than OP_LAST.
7142 * At this point the IR can be handed over to an interpreter, a dumb code generator
7143 * or to the optimizing code generator that will translate it to SSA form.
7145 * Profiling directed optimizations.
7146 * We may compile by default with few or no optimizations and instrument the code
7147 * or the user may indicate what methods to optimize the most either in a config file
7148 * or through repeated runs where the compiler applies offline the optimizations to
7149 * each method and then decides if it was worth it.
7152 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
7153 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
7154 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
7155 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
7156 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
7157 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
7158 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
7159 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) TYPE_LOAD_ERROR ((klass))
7161 /* offset from br.s -> br like opcodes */
7162 #define BIG_BRANCH_OFFSET 13
7165 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
7167 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
7169 return b == NULL || b == bb;
7173 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
7175 unsigned char *ip = start;
7176 unsigned char *target;
7179 MonoBasicBlock *bblock;
7180 const MonoOpcode *opcode;
7183 cli_addr = ip - start;
7184 i = mono_opcode_value ((const guint8 **)&ip, end);
7187 opcode = &mono_opcodes [i];
7188 switch (opcode->argument) {
7189 case MonoInlineNone:
7192 case MonoInlineString:
7193 case MonoInlineType:
7194 case MonoInlineField:
7195 case MonoInlineMethod:
7198 case MonoShortInlineR:
7205 case MonoShortInlineVar:
7206 case MonoShortInlineI:
7209 case MonoShortInlineBrTarget:
7210 target = start + cli_addr + 2 + (signed char)ip [1];
7211 GET_BBLOCK (cfg, bblock, target);
7214 GET_BBLOCK (cfg, bblock, ip);
7216 case MonoInlineBrTarget:
7217 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
7218 GET_BBLOCK (cfg, bblock, target);
7221 GET_BBLOCK (cfg, bblock, ip);
7223 case MonoInlineSwitch: {
7224 guint32 n = read32 (ip + 1);
7227 cli_addr += 5 + 4 * n;
7228 target = start + cli_addr;
7229 GET_BBLOCK (cfg, bblock, target);
7231 for (j = 0; j < n; ++j) {
7232 target = start + cli_addr + (gint32)read32 (ip);
7233 GET_BBLOCK (cfg, bblock, target);
7243 g_assert_not_reached ();
7246 if (i == CEE_THROW) {
7247 unsigned char *bb_start = ip - 1;
7249 /* Find the start of the bblock containing the throw */
7251 while ((bb_start >= start) && !bblock) {
7252 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
7256 bblock->out_of_line = 1;
7266 static inline MonoMethod *
7267 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
7271 if (m->wrapper_type != MONO_WRAPPER_NONE) {
7272 method = mono_method_get_wrapper_data (m, token);
7275 method = mono_class_inflate_generic_method_checked (method, context, &error);
7276 g_assert (mono_error_ok (&error)); /* FIXME don't swallow the error */
7279 method = mono_get_method_full (m->klass->image, token, klass, context);
7285 static inline MonoMethod *
7286 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
7288 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
7290 if (method && cfg && !cfg->gshared && mono_class_is_open_constructed_type (&method->klass->byval_arg))
7296 static inline MonoClass*
7297 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
7302 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7303 klass = mono_method_get_wrapper_data (method, token);
7305 klass = mono_class_inflate_generic_class (klass, context);
7307 klass = mono_class_get_and_inflate_typespec_checked (method->klass->image, token, context, &error);
7308 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7311 mono_class_init (klass);
7315 static inline MonoMethodSignature*
7316 mini_get_signature (MonoMethod *method, guint32 token, MonoGenericContext *context)
7318 MonoMethodSignature *fsig;
7320 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7321 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
7323 fsig = mono_metadata_parse_signature (method->klass->image, token);
7327 fsig = mono_inflate_generic_signature(fsig, context, &error);
7329 g_assert(mono_error_ok(&error));
7335 throw_exception (void)
7337 static MonoMethod *method = NULL;
7340 MonoSecurityManager *secman = mono_security_manager_get_methods ();
7341 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
7348 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
7350 MonoMethod *thrower = throw_exception ();
7353 EMIT_NEW_PCONST (cfg, args [0], ex);
7354 mono_emit_method_call (cfg, thrower, args, NULL);
7358 * Return the original method is a wrapper is specified. We can only access
7359 * the custom attributes from the original method.
7362 get_original_method (MonoMethod *method)
7364 if (method->wrapper_type == MONO_WRAPPER_NONE)
7367 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
7368 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
7371 /* in other cases we need to find the original method */
7372 return mono_marshal_method_from_wrapper (method);
7376 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field)
7378 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
7379 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
7381 emit_throw_exception (cfg, ex);
7385 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
7387 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
7388 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
7390 emit_throw_exception (cfg, ex);
7394 * Check that the IL instructions at ip are the array initialization
7395 * sequence and return the pointer to the data and the size.
7398 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
7401 * newarr[System.Int32]
7403 * ldtoken field valuetype ...
7404 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
7406 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
7408 guint32 token = read32 (ip + 7);
7409 guint32 field_token = read32 (ip + 2);
7410 guint32 field_index = field_token & 0xffffff;
7412 const char *data_ptr;
7414 MonoMethod *cmethod;
7415 MonoClass *dummy_class;
7416 MonoClassField *field = mono_field_from_token_checked (method->klass->image, field_token, &dummy_class, NULL, &error);
7420 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7424 *out_field_token = field_token;
7426 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
7429 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
7431 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
7432 case MONO_TYPE_BOOLEAN:
7436 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
7437 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
7438 case MONO_TYPE_CHAR:
7455 if (size > mono_type_size (field->type, &dummy_align))
7458 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
7459 if (!image_is_dynamic (method->klass->image)) {
7460 field_index = read32 (ip + 2) & 0xffffff;
7461 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
7462 data_ptr = mono_image_rva_map (method->klass->image, rva);
7463 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
7464 /* for aot code we do the lookup on load */
7465 if (aot && data_ptr)
7466 return GUINT_TO_POINTER (rva);
7468 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
7470 data_ptr = mono_field_get_data (field);
7478 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
7480 char *method_fname = mono_method_full_name (method, TRUE);
7482 MonoMethodHeader *header = mono_method_get_header (method);
7484 if (header->code_size == 0)
7485 method_code = g_strdup ("method body is empty.");
7487 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
7488 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
7489 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
7490 g_free (method_fname);
7491 g_free (method_code);
7492 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
7496 set_exception_object (MonoCompile *cfg, MonoException *exception)
7498 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OBJECT_SUPPLIED);
7499 MONO_GC_REGISTER_ROOT_SINGLE (cfg->exception_ptr, MONO_ROOT_SOURCE_JIT, "jit exception");
7500 cfg->exception_ptr = exception;
7504 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
7507 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
7508 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
7509 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
7510 /* Optimize reg-reg moves away */
7512 * Can't optimize other opcodes, since sp[0] might point to
7513 * the last ins of a decomposed opcode.
7515 sp [0]->dreg = (cfg)->locals [n]->dreg;
7517 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
7522 * ldloca inhibits many optimizations so try to get rid of it in common
7525 static inline unsigned char *
7526 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
7536 local = read16 (ip + 2);
7540 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
7541 /* From the INITOBJ case */
7542 token = read32 (ip + 2);
7543 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
7544 CHECK_TYPELOAD (klass);
7545 type = mini_get_underlying_type (&klass->byval_arg);
7546 emit_init_local (cfg, local, type, TRUE);
7554 is_exception_class (MonoClass *klass)
7557 if (klass == mono_defaults.exception_class)
7559 klass = klass->parent;
7565 * is_jit_optimizer_disabled:
7567 * Determine whenever M's assembly has a DebuggableAttribute with the
7568 * IsJITOptimizerDisabled flag set.
7571 is_jit_optimizer_disabled (MonoMethod *m)
7573 MonoAssembly *ass = m->klass->image->assembly;
7574 MonoCustomAttrInfo* attrs;
7575 static MonoClass *klass;
7577 gboolean val = FALSE;
7580 if (ass->jit_optimizer_disabled_inited)
7581 return ass->jit_optimizer_disabled;
7584 klass = mono_class_from_name (mono_defaults.corlib, "System.Diagnostics", "DebuggableAttribute");
7587 ass->jit_optimizer_disabled = FALSE;
7588 mono_memory_barrier ();
7589 ass->jit_optimizer_disabled_inited = TRUE;
7593 attrs = mono_custom_attrs_from_assembly (ass);
7595 for (i = 0; i < attrs->num_attrs; ++i) {
7596 MonoCustomAttrEntry *attr = &attrs->attrs [i];
7598 MonoMethodSignature *sig;
7600 if (!attr->ctor || attr->ctor->klass != klass)
7602 /* Decode the attribute. See reflection.c */
7603 p = (const char*)attr->data;
7604 g_assert (read16 (p) == 0x0001);
7607 // FIXME: Support named parameters
7608 sig = mono_method_signature (attr->ctor);
7609 if (sig->param_count != 2 || sig->params [0]->type != MONO_TYPE_BOOLEAN || sig->params [1]->type != MONO_TYPE_BOOLEAN)
7611 /* Two boolean arguments */
7615 mono_custom_attrs_free (attrs);
7618 ass->jit_optimizer_disabled = val;
7619 mono_memory_barrier ();
7620 ass->jit_optimizer_disabled_inited = TRUE;
7626 is_supported_tail_call (MonoCompile *cfg, MonoMethod *method, MonoMethod *cmethod, MonoMethodSignature *fsig, int call_opcode)
7628 gboolean supported_tail_call;
7631 supported_tail_call = mono_arch_tail_call_supported (cfg, mono_method_signature (method), mono_method_signature (cmethod));
7633 for (i = 0; i < fsig->param_count; ++i) {
7634 if (fsig->params [i]->byref || fsig->params [i]->type == MONO_TYPE_PTR || fsig->params [i]->type == MONO_TYPE_FNPTR)
7635 /* These can point to the current method's stack */
7636 supported_tail_call = FALSE;
7638 if (fsig->hasthis && cmethod->klass->valuetype)
7639 /* this might point to the current method's stack */
7640 supported_tail_call = FALSE;
7641 if (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)
7642 supported_tail_call = FALSE;
7643 if (cfg->method->save_lmf)
7644 supported_tail_call = FALSE;
7645 if (cmethod->wrapper_type && cmethod->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
7646 supported_tail_call = FALSE;
7647 if (call_opcode != CEE_CALL)
7648 supported_tail_call = FALSE;
7650 /* Debugging support */
7652 if (supported_tail_call) {
7653 if (!mono_debug_count ())
7654 supported_tail_call = FALSE;
7658 return supported_tail_call;
7664 * Handle calls made to ctors from NEWOBJ opcodes.
7667 handle_ctor_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used,
7668 MonoInst **sp, guint8 *ip, int *inline_costs)
7670 MonoInst *vtable_arg = NULL, *callvirt_this_arg = NULL, *ins;
7672 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
7673 mono_method_is_generic_sharable (cmethod, TRUE)) {
7674 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
7675 mono_class_vtable (cfg->domain, cmethod->klass);
7676 CHECK_TYPELOAD (cmethod->klass);
7678 vtable_arg = emit_get_rgctx_method (cfg, context_used,
7679 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7682 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
7683 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7685 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7687 CHECK_TYPELOAD (cmethod->klass);
7688 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7693 /* Avoid virtual calls to ctors if possible */
7694 if (mono_class_is_marshalbyref (cmethod->klass))
7695 callvirt_this_arg = sp [0];
7697 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
7698 g_assert (MONO_TYPE_IS_VOID (fsig->ret));
7699 CHECK_CFG_EXCEPTION;
7700 } else if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
7701 mono_method_check_inlining (cfg, cmethod) &&
7702 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE)) {
7705 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, FALSE))) {
7706 cfg->real_offset += 5;
7708 *inline_costs += costs - 5;
7710 INLINE_FAILURE ("inline failure");
7711 // FIXME-VT: Clean this up
7712 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig))
7713 GSHAREDVT_FAILURE(*ip);
7714 mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, callvirt_this_arg, NULL, NULL);
7716 } else if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
7719 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE);
7720 mono_emit_calli (cfg, fsig, sp, addr, NULL, vtable_arg);
7721 } else if (context_used &&
7722 ((!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
7723 !mono_class_generic_sharing_enabled (cmethod->klass)) || cfg->gsharedvt)) {
7724 MonoInst *cmethod_addr;
7726 /* Generic calls made out of gsharedvt methods cannot be patched, so use an indirect call */
7728 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
7729 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7731 mono_emit_calli (cfg, fsig, sp, cmethod_addr, NULL, vtable_arg);
7733 INLINE_FAILURE ("ctor call");
7734 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp,
7735 callvirt_this_arg, NULL, vtable_arg);
7742 emit_setret (MonoCompile *cfg, MonoInst *val)
7744 MonoType *ret_type = mini_get_underlying_type (mono_method_signature (cfg->method)->ret);
7747 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
7750 if (!cfg->vret_addr) {
7751 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, val);
7753 EMIT_NEW_RETLOADA (cfg, ret_addr);
7755 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, val->dreg);
7756 ins->klass = mono_class_from_mono_type (ret_type);
7759 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
7760 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
7761 MonoInst *iargs [1];
7765 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
7766 mono_arch_emit_setret (cfg, cfg->method, conv);
7768 mono_arch_emit_setret (cfg, cfg->method, val);
7771 mono_arch_emit_setret (cfg, cfg->method, val);
7776 static MonoMethodSignature*
7777 sig_to_rgctx_sig (MonoMethodSignature *sig)
7779 // FIXME: memory allocation
7780 MonoMethodSignature *res;
7783 res = g_malloc (MONO_SIZEOF_METHOD_SIGNATURE + (sig->param_count + 1) * sizeof (MonoType*));
7784 memcpy (res, sig, MONO_SIZEOF_METHOD_SIGNATURE);
7785 res->param_count = sig->param_count + 1;
7786 for (i = 0; i < sig->param_count; ++i)
7787 res->params [i] = sig->params [i];
7788 res->params [sig->param_count] = &mono_defaults.int_class->byval_arg;
7793 * mono_method_to_ir:
7795 * Translate the .net IL into linear IR.
7798 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
7799 MonoInst *return_var, MonoInst **inline_args,
7800 guint inline_offset, gboolean is_virtual_call)
7803 MonoInst *ins, **sp, **stack_start;
7804 MonoBasicBlock *tblock = NULL, *init_localsbb = NULL;
7805 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
7806 MonoMethod *cmethod, *method_definition;
7807 MonoInst **arg_array;
7808 MonoMethodHeader *header;
7810 guint32 token, ins_flag;
7812 MonoClass *constrained_class = NULL;
7813 unsigned char *ip, *end, *target, *err_pos;
7814 MonoMethodSignature *sig;
7815 MonoGenericContext *generic_context = NULL;
7816 MonoGenericContainer *generic_container = NULL;
7817 MonoType **param_types;
7818 int i, n, start_new_bblock, dreg;
7819 int num_calls = 0, inline_costs = 0;
7820 int breakpoint_id = 0;
7822 GSList *class_inits = NULL;
7823 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
7825 gboolean init_locals, seq_points, skip_dead_blocks;
7826 gboolean sym_seq_points = FALSE;
7827 MonoDebugMethodInfo *minfo;
7828 MonoBitSet *seq_point_locs = NULL;
7829 MonoBitSet *seq_point_set_locs = NULL;
7831 cfg->disable_inline = is_jit_optimizer_disabled (method);
7833 /* serialization and xdomain stuff may need access to private fields and methods */
7834 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
7835 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
7836 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
7837 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
7838 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
7839 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
7841 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
7842 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
7843 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
7844 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
7845 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_STELEMREF;
7847 image = method->klass->image;
7848 header = mono_method_get_header (method);
7850 MonoLoaderError *error;
7852 if ((error = mono_loader_get_last_error ())) {
7853 mono_cfg_set_exception (cfg, error->exception_type);
7855 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
7856 cfg->exception_message = g_strdup_printf ("Missing or incorrect header for method %s", cfg->method->name);
7858 goto exception_exit;
7860 generic_container = mono_method_get_generic_container (method);
7861 sig = mono_method_signature (method);
7862 num_args = sig->hasthis + sig->param_count;
7863 ip = (unsigned char*)header->code;
7864 cfg->cil_start = ip;
7865 end = ip + header->code_size;
7866 cfg->stat_cil_code_size += header->code_size;
7868 seq_points = cfg->gen_seq_points && cfg->method == method;
7870 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
7871 /* We could hit a seq point before attaching to the JIT (#8338) */
7875 if (cfg->gen_sdb_seq_points && cfg->method == method) {
7876 minfo = mono_debug_lookup_method (method);
7878 MonoSymSeqPoint *sps;
7879 int i, n_il_offsets;
7881 mono_debug_get_seq_points (minfo, NULL, NULL, NULL, &sps, &n_il_offsets);
7882 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7883 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7884 sym_seq_points = TRUE;
7885 for (i = 0; i < n_il_offsets; ++i) {
7886 if (sps [i].il_offset < header->code_size)
7887 mono_bitset_set_fast (seq_point_locs, sps [i].il_offset);
7890 } else if (!method->wrapper_type && !method->dynamic && mono_debug_image_has_debug_info (method->klass->image)) {
7891 /* Methods without line number info like auto-generated property accessors */
7892 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7893 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7894 sym_seq_points = TRUE;
7899 * Methods without init_locals set could cause asserts in various passes
7900 * (#497220). To work around this, we emit dummy initialization opcodes
7901 * (OP_DUMMY_ICONST etc.) which generate no code. These are only supported
7902 * on some platforms.
7904 if ((cfg->opt & MONO_OPT_UNSAFE) && cfg->backend->have_dummy_init)
7905 init_locals = header->init_locals;
7909 method_definition = method;
7910 while (method_definition->is_inflated) {
7911 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
7912 method_definition = imethod->declaring;
7915 /* SkipVerification is not allowed if core-clr is enabled */
7916 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
7918 dont_verify_stloc = TRUE;
7921 if (sig->is_inflated)
7922 generic_context = mono_method_get_context (method);
7923 else if (generic_container)
7924 generic_context = &generic_container->context;
7925 cfg->generic_context = generic_context;
7928 g_assert (!sig->has_type_parameters);
7930 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
7931 g_assert (method->is_inflated);
7932 g_assert (mono_method_get_context (method)->method_inst);
7934 if (method->is_inflated && mono_method_get_context (method)->method_inst)
7935 g_assert (sig->generic_param_count);
7937 if (cfg->method == method) {
7938 cfg->real_offset = 0;
7940 cfg->real_offset = inline_offset;
7943 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
7944 cfg->cil_offset_to_bb_len = header->code_size;
7946 cfg->current_method = method;
7948 if (cfg->verbose_level > 2)
7949 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
7951 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
7953 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
7954 for (n = 0; n < sig->param_count; ++n)
7955 param_types [n + sig->hasthis] = sig->params [n];
7956 cfg->arg_types = param_types;
7958 cfg->dont_inline = g_list_prepend (cfg->dont_inline, method);
7959 if (cfg->method == method) {
7961 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
7962 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
7965 NEW_BBLOCK (cfg, start_bblock);
7966 cfg->bb_entry = start_bblock;
7967 start_bblock->cil_code = NULL;
7968 start_bblock->cil_length = 0;
7971 NEW_BBLOCK (cfg, end_bblock);
7972 cfg->bb_exit = end_bblock;
7973 end_bblock->cil_code = NULL;
7974 end_bblock->cil_length = 0;
7975 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
7976 g_assert (cfg->num_bblocks == 2);
7978 arg_array = cfg->args;
7980 if (header->num_clauses) {
7981 cfg->spvars = g_hash_table_new (NULL, NULL);
7982 cfg->exvars = g_hash_table_new (NULL, NULL);
7984 /* handle exception clauses */
7985 for (i = 0; i < header->num_clauses; ++i) {
7986 MonoBasicBlock *try_bb;
7987 MonoExceptionClause *clause = &header->clauses [i];
7988 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
7990 try_bb->real_offset = clause->try_offset;
7991 try_bb->try_start = TRUE;
7992 try_bb->region = ((i + 1) << 8) | clause->flags;
7993 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
7994 tblock->real_offset = clause->handler_offset;
7995 tblock->flags |= BB_EXCEPTION_HANDLER;
7998 * Linking the try block with the EH block hinders inlining as we won't be able to
7999 * merge the bblocks from inlining and produce an artificial hole for no good reason.
8001 if (COMPILE_LLVM (cfg))
8002 link_bblock (cfg, try_bb, tblock);
8004 if (*(ip + clause->handler_offset) == CEE_POP)
8005 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
8007 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
8008 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
8009 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
8010 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
8011 MONO_ADD_INS (tblock, ins);
8013 if (seq_points && clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY && clause->flags != MONO_EXCEPTION_CLAUSE_FILTER) {
8014 /* finally clauses already have a seq point */
8015 /* seq points for filter clauses are emitted below */
8016 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
8017 MONO_ADD_INS (tblock, ins);
8020 /* todo: is a fault block unsafe to optimize? */
8021 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
8022 tblock->flags |= BB_EXCEPTION_UNSAFE;
8025 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
8027 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
8029 /* catch and filter blocks get the exception object on the stack */
8030 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
8031 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
8033 /* mostly like handle_stack_args (), but just sets the input args */
8034 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
8035 tblock->in_scount = 1;
8036 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
8037 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
8041 #ifdef MONO_CONTEXT_SET_LLVM_EXC_REG
8042 /* The EH code passes in the exception in a register to both JITted and LLVM compiled code */
8043 if (!cfg->compile_llvm) {
8044 MONO_INST_NEW (cfg, ins, OP_GET_EX_OBJ);
8045 ins->dreg = tblock->in_stack [0]->dreg;
8046 MONO_ADD_INS (tblock, ins);
8049 MonoInst *dummy_use;
8052 * Add a dummy use for the exvar so its liveness info will be
8055 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
8058 if (seq_points && clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
8059 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
8060 MONO_ADD_INS (tblock, ins);
8063 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
8064 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
8065 tblock->flags |= BB_EXCEPTION_HANDLER;
8066 tblock->real_offset = clause->data.filter_offset;
8067 tblock->in_scount = 1;
8068 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
8069 /* The filter block shares the exvar with the handler block */
8070 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
8071 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
8072 MONO_ADD_INS (tblock, ins);
8076 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
8077 clause->data.catch_class &&
8079 mono_class_check_context_used (clause->data.catch_class)) {
8081 * In shared generic code with catch
8082 * clauses containing type variables
8083 * the exception handling code has to
8084 * be able to get to the rgctx.
8085 * Therefore we have to make sure that
8086 * the vtable/mrgctx argument (for
8087 * static or generic methods) or the
8088 * "this" argument (for non-static
8089 * methods) are live.
8091 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
8092 mini_method_get_context (method)->method_inst ||
8093 method->klass->valuetype) {
8094 mono_get_vtable_var (cfg);
8096 MonoInst *dummy_use;
8098 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
8103 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
8104 cfg->cbb = start_bblock;
8105 cfg->args = arg_array;
8106 mono_save_args (cfg, sig, inline_args);
8109 /* FIRST CODE BLOCK */
8110 NEW_BBLOCK (cfg, tblock);
8111 tblock->cil_code = ip;
8115 ADD_BBLOCK (cfg, tblock);
8117 if (cfg->method == method) {
8118 breakpoint_id = mono_debugger_method_has_breakpoint (method);
8119 if (breakpoint_id) {
8120 MONO_INST_NEW (cfg, ins, OP_BREAK);
8121 MONO_ADD_INS (cfg->cbb, ins);
8125 /* we use a separate basic block for the initialization code */
8126 NEW_BBLOCK (cfg, init_localsbb);
8127 cfg->bb_init = init_localsbb;
8128 init_localsbb->real_offset = cfg->real_offset;
8129 start_bblock->next_bb = init_localsbb;
8130 init_localsbb->next_bb = cfg->cbb;
8131 link_bblock (cfg, start_bblock, init_localsbb);
8132 link_bblock (cfg, init_localsbb, cfg->cbb);
8134 cfg->cbb = init_localsbb;
8136 if (cfg->gsharedvt && cfg->method == method) {
8137 MonoGSharedVtMethodInfo *info;
8138 MonoInst *var, *locals_var;
8141 info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoGSharedVtMethodInfo));
8142 info->method = cfg->method;
8143 info->count_entries = 16;
8144 info->entries = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
8145 cfg->gsharedvt_info = info;
8147 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
8148 /* prevent it from being register allocated */
8149 //var->flags |= MONO_INST_VOLATILE;
8150 cfg->gsharedvt_info_var = var;
8152 ins = emit_get_rgctx_gsharedvt_method (cfg, mini_method_check_context_used (cfg, method), method, info);
8153 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, var->dreg, ins->dreg);
8155 /* Allocate locals */
8156 locals_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
8157 /* prevent it from being register allocated */
8158 //locals_var->flags |= MONO_INST_VOLATILE;
8159 cfg->gsharedvt_locals_var = locals_var;
8161 dreg = alloc_ireg (cfg);
8162 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, dreg, var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, locals_size));
8164 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
8165 ins->dreg = locals_var->dreg;
8167 MONO_ADD_INS (cfg->cbb, ins);
8168 cfg->gsharedvt_locals_var_ins = ins;
8170 cfg->flags |= MONO_CFG_HAS_ALLOCA;
8173 ins->flags |= MONO_INST_INIT;
8177 if (mono_security_core_clr_enabled ()) {
8178 /* check if this is native code, e.g. an icall or a p/invoke */
8179 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
8180 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
8182 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
8183 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
8185 /* if this ia a native call then it can only be JITted from platform code */
8186 if ((icall || pinvk) && method->klass && method->klass->image) {
8187 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
8188 MonoException *ex = icall ? mono_get_exception_security () :
8189 mono_get_exception_method_access ();
8190 emit_throw_exception (cfg, ex);
8197 CHECK_CFG_EXCEPTION;
8199 if (header->code_size == 0)
8202 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
8207 if (cfg->method == method)
8208 mono_debug_init_method (cfg, cfg->cbb, breakpoint_id);
8210 for (n = 0; n < header->num_locals; ++n) {
8211 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
8216 /* We force the vtable variable here for all shared methods
8217 for the possibility that they might show up in a stack
8218 trace where their exact instantiation is needed. */
8219 if (cfg->gshared && method == cfg->method) {
8220 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
8221 mini_method_get_context (method)->method_inst ||
8222 method->klass->valuetype) {
8223 mono_get_vtable_var (cfg);
8225 /* FIXME: Is there a better way to do this?
8226 We need the variable live for the duration
8227 of the whole method. */
8228 cfg->args [0]->flags |= MONO_INST_VOLATILE;
8232 /* add a check for this != NULL to inlined methods */
8233 if (is_virtual_call) {
8236 NEW_ARGLOAD (cfg, arg_ins, 0);
8237 MONO_ADD_INS (cfg->cbb, arg_ins);
8238 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
8241 skip_dead_blocks = !dont_verify;
8242 if (skip_dead_blocks) {
8243 original_bb = bb = mono_basic_block_split (method, &cfg->error);
8248 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
8249 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
8252 start_new_bblock = 0;
8254 if (cfg->method == method)
8255 cfg->real_offset = ip - header->code;
8257 cfg->real_offset = inline_offset;
8262 if (start_new_bblock) {
8263 cfg->cbb->cil_length = ip - cfg->cbb->cil_code;
8264 if (start_new_bblock == 2) {
8265 g_assert (ip == tblock->cil_code);
8267 GET_BBLOCK (cfg, tblock, ip);
8269 cfg->cbb->next_bb = tblock;
8271 start_new_bblock = 0;
8272 for (i = 0; i < cfg->cbb->in_scount; ++i) {
8273 if (cfg->verbose_level > 3)
8274 printf ("loading %d from temp %d\n", i, (int)cfg->cbb->in_stack [i]->inst_c0);
8275 EMIT_NEW_TEMPLOAD (cfg, ins, cfg->cbb->in_stack [i]->inst_c0);
8279 g_slist_free (class_inits);
8282 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != cfg->cbb)) {
8283 link_bblock (cfg, cfg->cbb, tblock);
8284 if (sp != stack_start) {
8285 handle_stack_args (cfg, stack_start, sp - stack_start);
8287 CHECK_UNVERIFIABLE (cfg);
8289 cfg->cbb->next_bb = tblock;
8291 for (i = 0; i < cfg->cbb->in_scount; ++i) {
8292 if (cfg->verbose_level > 3)
8293 printf ("loading %d from temp %d\n", i, (int)cfg->cbb->in_stack [i]->inst_c0);
8294 EMIT_NEW_TEMPLOAD (cfg, ins, cfg->cbb->in_stack [i]->inst_c0);
8297 g_slist_free (class_inits);
8302 if (skip_dead_blocks) {
8303 int ip_offset = ip - header->code;
8305 if (ip_offset == bb->end)
8309 int op_size = mono_opcode_size (ip, end);
8310 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
8312 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
8314 if (ip_offset + op_size == bb->end) {
8315 MONO_INST_NEW (cfg, ins, OP_NOP);
8316 MONO_ADD_INS (cfg->cbb, ins);
8317 start_new_bblock = 1;
8325 * Sequence points are points where the debugger can place a breakpoint.
8326 * Currently, we generate these automatically at points where the IL
8329 if (seq_points && ((!sym_seq_points && (sp == stack_start)) || (sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code)))) {
8331 * Make methods interruptable at the beginning, and at the targets of
8332 * backward branches.
8333 * Also, do this at the start of every bblock in methods with clauses too,
8334 * to be able to handle instructions with inprecise control flow like
8336 * Backward branches are handled at the end of method-to-ir ().
8338 gboolean intr_loc = ip == header->code || (!cfg->cbb->last_ins && cfg->header->num_clauses);
8339 gboolean sym_seq_point = sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code);
8341 /* Avoid sequence points on empty IL like .volatile */
8342 // FIXME: Enable this
8343 //if (!(cfg->cbb->last_ins && cfg->cbb->last_ins->opcode == OP_SEQ_POINT)) {
8344 NEW_SEQ_POINT (cfg, ins, ip - header->code, intr_loc);
8345 if ((sp != stack_start) && !sym_seq_point)
8346 ins->flags |= MONO_INST_NONEMPTY_STACK;
8347 MONO_ADD_INS (cfg->cbb, ins);
8350 mono_bitset_set_fast (seq_point_set_locs, ip - header->code);
8353 cfg->cbb->real_offset = cfg->real_offset;
8355 if ((cfg->method == method) && cfg->coverage_info) {
8356 guint32 cil_offset = ip - header->code;
8357 cfg->coverage_info->data [cil_offset].cil_code = ip;
8359 /* TODO: Use an increment here */
8360 #if defined(TARGET_X86)
8361 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
8362 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
8364 MONO_ADD_INS (cfg->cbb, ins);
8366 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
8367 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
8371 if (cfg->verbose_level > 3)
8372 printf ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
8376 if (seq_points && !sym_seq_points && sp != stack_start) {
8378 * The C# compiler uses these nops to notify the JIT that it should
8379 * insert seq points.
8381 NEW_SEQ_POINT (cfg, ins, ip - header->code, FALSE);
8382 MONO_ADD_INS (cfg->cbb, ins);
8384 if (cfg->keep_cil_nops)
8385 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
8387 MONO_INST_NEW (cfg, ins, OP_NOP);
8389 MONO_ADD_INS (cfg->cbb, ins);
8392 if (should_insert_brekpoint (cfg->method)) {
8393 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
8395 MONO_INST_NEW (cfg, ins, OP_NOP);
8398 MONO_ADD_INS (cfg->cbb, ins);
8404 CHECK_STACK_OVF (1);
8405 n = (*ip)-CEE_LDARG_0;
8407 EMIT_NEW_ARGLOAD (cfg, ins, n);
8415 CHECK_STACK_OVF (1);
8416 n = (*ip)-CEE_LDLOC_0;
8418 EMIT_NEW_LOCLOAD (cfg, ins, n);
8427 n = (*ip)-CEE_STLOC_0;
8430 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
8432 emit_stloc_ir (cfg, sp, header, n);
8439 CHECK_STACK_OVF (1);
8442 EMIT_NEW_ARGLOAD (cfg, ins, n);
8448 CHECK_STACK_OVF (1);
8451 NEW_ARGLOADA (cfg, ins, n);
8452 MONO_ADD_INS (cfg->cbb, ins);
8462 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
8464 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
8469 CHECK_STACK_OVF (1);
8472 EMIT_NEW_LOCLOAD (cfg, ins, n);
8476 case CEE_LDLOCA_S: {
8477 unsigned char *tmp_ip;
8479 CHECK_STACK_OVF (1);
8480 CHECK_LOCAL (ip [1]);
8482 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
8488 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
8497 CHECK_LOCAL (ip [1]);
8498 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
8500 emit_stloc_ir (cfg, sp, header, ip [1]);
8505 CHECK_STACK_OVF (1);
8506 EMIT_NEW_PCONST (cfg, ins, NULL);
8507 ins->type = STACK_OBJ;
8512 CHECK_STACK_OVF (1);
8513 EMIT_NEW_ICONST (cfg, ins, -1);
8526 CHECK_STACK_OVF (1);
8527 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
8533 CHECK_STACK_OVF (1);
8535 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
8541 CHECK_STACK_OVF (1);
8542 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
8548 CHECK_STACK_OVF (1);
8549 MONO_INST_NEW (cfg, ins, OP_I8CONST);
8550 ins->type = STACK_I8;
8551 ins->dreg = alloc_dreg (cfg, STACK_I8);
8553 ins->inst_l = (gint64)read64 (ip);
8554 MONO_ADD_INS (cfg->cbb, ins);
8560 gboolean use_aotconst = FALSE;
8562 #ifdef TARGET_POWERPC
8563 /* FIXME: Clean this up */
8564 if (cfg->compile_aot)
8565 use_aotconst = TRUE;
8568 /* FIXME: we should really allocate this only late in the compilation process */
8569 f = mono_domain_alloc (cfg->domain, sizeof (float));
8571 CHECK_STACK_OVF (1);
8577 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
8579 dreg = alloc_freg (cfg);
8580 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
8581 ins->type = cfg->r4_stack_type;
8583 MONO_INST_NEW (cfg, ins, OP_R4CONST);
8584 ins->type = cfg->r4_stack_type;
8585 ins->dreg = alloc_dreg (cfg, STACK_R8);
8587 MONO_ADD_INS (cfg->cbb, ins);
8597 gboolean use_aotconst = FALSE;
8599 #ifdef TARGET_POWERPC
8600 /* FIXME: Clean this up */
8601 if (cfg->compile_aot)
8602 use_aotconst = TRUE;
8605 /* FIXME: we should really allocate this only late in the compilation process */
8606 d = mono_domain_alloc (cfg->domain, sizeof (double));
8608 CHECK_STACK_OVF (1);
8614 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
8616 dreg = alloc_freg (cfg);
8617 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
8618 ins->type = STACK_R8;
8620 MONO_INST_NEW (cfg, ins, OP_R8CONST);
8621 ins->type = STACK_R8;
8622 ins->dreg = alloc_dreg (cfg, STACK_R8);
8624 MONO_ADD_INS (cfg->cbb, ins);
8633 MonoInst *temp, *store;
8635 CHECK_STACK_OVF (1);
8639 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
8640 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
8642 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
8645 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
8658 if (sp [0]->type == STACK_R8)
8659 /* we need to pop the value from the x86 FP stack */
8660 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
8665 MonoMethodSignature *fsig;
8668 INLINE_FAILURE ("jmp");
8669 GSHAREDVT_FAILURE (*ip);
8672 if (stack_start != sp)
8674 token = read32 (ip + 1);
8675 /* FIXME: check the signature matches */
8676 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
8678 if (!cmethod || mono_loader_get_last_error ())
8681 if (cfg->gshared && mono_method_check_context_used (cmethod))
8682 GENERIC_SHARING_FAILURE (CEE_JMP);
8684 emit_instrumentation_call (cfg, mono_profiler_method_leave);
8686 fsig = mono_method_signature (cmethod);
8687 n = fsig->param_count + fsig->hasthis;
8688 if (cfg->llvm_only) {
8691 args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
8692 for (i = 0; i < n; ++i)
8693 EMIT_NEW_ARGLOAD (cfg, args [i], i);
8694 ins = mono_emit_method_call_full (cfg, cmethod, fsig, TRUE, args, NULL, NULL, NULL);
8696 * The code in mono-basic-block.c treats the rest of the code as dead, but we
8697 * have to emit a normal return since llvm expects it.
8700 emit_setret (cfg, ins);
8701 MONO_INST_NEW (cfg, ins, OP_BR);
8702 ins->inst_target_bb = end_bblock;
8703 MONO_ADD_INS (cfg->cbb, ins);
8704 link_bblock (cfg, cfg->cbb, end_bblock);
8707 } else if (cfg->backend->have_op_tail_call) {
8708 /* Handle tail calls similarly to calls */
8711 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
8712 call->method = cmethod;
8713 call->tail_call = TRUE;
8714 call->signature = mono_method_signature (cmethod);
8715 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
8716 call->inst.inst_p0 = cmethod;
8717 for (i = 0; i < n; ++i)
8718 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
8720 mono_arch_emit_call (cfg, call);
8721 cfg->param_area = MAX(cfg->param_area, call->stack_usage);
8722 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
8724 for (i = 0; i < num_args; ++i)
8725 /* Prevent arguments from being optimized away */
8726 arg_array [i]->flags |= MONO_INST_VOLATILE;
8728 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
8729 ins = (MonoInst*)call;
8730 ins->inst_p0 = cmethod;
8731 MONO_ADD_INS (cfg->cbb, ins);
8735 start_new_bblock = 1;
8740 MonoMethodSignature *fsig;
8743 token = read32 (ip + 1);
8747 //GSHAREDVT_FAILURE (*ip);
8752 fsig = mini_get_signature (method, token, generic_context);
8754 if (method->dynamic && fsig->pinvoke) {
8758 * This is a call through a function pointer using a pinvoke
8759 * signature. Have to create a wrapper and call that instead.
8760 * FIXME: This is very slow, need to create a wrapper at JIT time
8761 * instead based on the signature.
8763 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
8764 EMIT_NEW_PCONST (cfg, args [1], fsig);
8766 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
8769 n = fsig->param_count + fsig->hasthis;
8773 //g_assert (!virtual || fsig->hasthis);
8777 inline_costs += 10 * num_calls++;
8780 * Making generic calls out of gsharedvt methods.
8781 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
8782 * patching gshared method addresses into a gsharedvt method.
8784 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
8786 * We pass the address to the gsharedvt trampoline in the rgctx reg
8788 MonoInst *callee = addr;
8790 if (method->wrapper_type != MONO_WRAPPER_DELEGATE_INVOKE)
8792 GSHAREDVT_FAILURE (*ip);
8794 addr = emit_get_rgctx_sig (cfg, context_used,
8795 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
8796 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, callee);
8800 /* Prevent inlining of methods with indirect calls */
8801 INLINE_FAILURE ("indirect call");
8803 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST || addr->opcode == OP_GOT_ENTRY) {
8808 * Instead of emitting an indirect call, emit a direct call
8809 * with the contents of the aotconst as the patch info.
8811 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST) {
8812 info_type = addr->inst_c1;
8813 info_data = addr->inst_p0;
8815 info_type = addr->inst_right->inst_c1;
8816 info_data = addr->inst_right->inst_left;
8819 if (info_type == MONO_PATCH_INFO_ICALL_ADDR || info_type == MONO_PATCH_INFO_JIT_ICALL_ADDR) {
8820 ins = (MonoInst*)mono_emit_abs_call (cfg, info_type, info_data, fsig, sp);
8825 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8829 /* End of call, INS should contain the result of the call, if any */
8831 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8833 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
8836 CHECK_CFG_EXCEPTION;
8840 constrained_class = NULL;
8844 case CEE_CALLVIRT: {
8845 MonoInst *addr = NULL;
8846 MonoMethodSignature *fsig = NULL;
8848 int virtual = *ip == CEE_CALLVIRT;
8849 gboolean pass_imt_from_rgctx = FALSE;
8850 MonoInst *imt_arg = NULL;
8851 MonoInst *keep_this_alive = NULL;
8852 gboolean pass_vtable = FALSE;
8853 gboolean pass_mrgctx = FALSE;
8854 MonoInst *vtable_arg = NULL;
8855 gboolean check_this = FALSE;
8856 gboolean supported_tail_call = FALSE;
8857 gboolean tail_call = FALSE;
8858 gboolean need_seq_point = FALSE;
8859 guint32 call_opcode = *ip;
8860 gboolean emit_widen = TRUE;
8861 gboolean push_res = TRUE;
8862 gboolean skip_ret = FALSE;
8863 gboolean delegate_invoke = FALSE;
8864 gboolean direct_icall = FALSE;
8865 gboolean constrained_partial_call = FALSE;
8866 MonoMethod *cil_method;
8869 token = read32 (ip + 1);
8873 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
8874 cil_method = cmethod;
8876 if (constrained_class) {
8877 if ((constrained_class->byval_arg.type == MONO_TYPE_VAR || constrained_class->byval_arg.type == MONO_TYPE_MVAR) && cfg->gshared) {
8878 if (!mini_is_gsharedvt_klass (constrained_class)) {
8879 g_assert (!cmethod->klass->valuetype);
8880 if (!mini_type_is_reference (&constrained_class->byval_arg))
8881 constrained_partial_call = TRUE;
8885 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8886 if (cfg->verbose_level > 2)
8887 printf ("DM Constrained call to %s\n", mono_type_get_full_name (constrained_class));
8888 if (!((constrained_class->byval_arg.type == MONO_TYPE_VAR ||
8889 constrained_class->byval_arg.type == MONO_TYPE_MVAR) &&
8891 cmethod = mono_get_method_constrained_with_method (image, cil_method, constrained_class, generic_context, &cfg->error);
8895 if (cfg->verbose_level > 2)
8896 printf ("Constrained call to %s\n", mono_type_get_full_name (constrained_class));
8898 if ((constrained_class->byval_arg.type == MONO_TYPE_VAR || constrained_class->byval_arg.type == MONO_TYPE_MVAR) && cfg->gshared) {
8900 * This is needed since get_method_constrained can't find
8901 * the method in klass representing a type var.
8902 * The type var is guaranteed to be a reference type in this
8905 if (!mini_is_gsharedvt_klass (constrained_class))
8906 g_assert (!cmethod->klass->valuetype);
8908 cmethod = mono_get_method_constrained_checked (image, token, constrained_class, generic_context, &cil_method, &cfg->error);
8914 if (!cmethod || mono_loader_get_last_error ())
8916 if (!dont_verify && !cfg->skip_visibility) {
8917 MonoMethod *target_method = cil_method;
8918 if (method->is_inflated) {
8919 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
8921 if (!mono_method_can_access_method (method_definition, target_method) &&
8922 !mono_method_can_access_method (method, cil_method))
8923 METHOD_ACCESS_FAILURE (method, cil_method);
8926 if (mono_security_core_clr_enabled ())
8927 ensure_method_is_allowed_to_call_method (cfg, method, cil_method);
8929 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
8930 /* MS.NET seems to silently convert this to a callvirt */
8935 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
8936 * converts to a callvirt.
8938 * tests/bug-515884.il is an example of this behavior
8940 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
8941 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
8942 if (!virtual && mono_class_is_marshalbyref (cmethod->klass) && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
8946 if (!cmethod->klass->inited)
8947 if (!mono_class_init (cmethod->klass))
8948 TYPE_LOAD_ERROR (cmethod->klass);
8950 fsig = mono_method_signature (cmethod);
8953 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
8954 mini_class_is_system_array (cmethod->klass)) {
8955 array_rank = cmethod->klass->rank;
8956 } else if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) && icall_is_direct_callable (cfg, cmethod)) {
8957 direct_icall = TRUE;
8958 } else if (fsig->pinvoke) {
8959 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod, TRUE, cfg->compile_aot);
8960 fsig = mono_method_signature (wrapper);
8961 } else if (constrained_class) {
8963 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
8967 /* See code below */
8968 if (cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
8969 MonoBasicBlock *tbb;
8971 GET_BBLOCK (cfg, tbb, ip + 5);
8972 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
8974 * We want to extend the try block to cover the call, but we can't do it if the
8975 * call is made directly since its followed by an exception check.
8977 direct_icall = FALSE;
8981 mono_save_token_info (cfg, image, token, cil_method);
8983 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip + 5 - header->code)))
8984 need_seq_point = TRUE;
8986 /* Don't support calls made using type arguments for now */
8988 if (cfg->gsharedvt) {
8989 if (mini_is_gsharedvt_signature (fsig))
8990 GSHAREDVT_FAILURE (*ip);
8994 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
8995 g_assert_not_reached ();
8997 n = fsig->param_count + fsig->hasthis;
8999 if (!cfg->gshared && cmethod->klass->generic_container)
9003 g_assert (!mono_method_check_context_used (cmethod));
9007 //g_assert (!virtual || fsig->hasthis);
9011 if (constrained_class) {
9012 if (mini_is_gsharedvt_klass (constrained_class)) {
9013 if ((cmethod->klass != mono_defaults.object_class) && constrained_class->valuetype && cmethod->klass->valuetype) {
9014 /* The 'Own method' case below */
9015 } else if (cmethod->klass->image != mono_defaults.corlib && !(cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) && !cmethod->klass->valuetype) {
9016 /* 'The type parameter is instantiated as a reference type' case below. */
9018 ins = handle_constrained_gsharedvt_call (cfg, cmethod, fsig, sp, constrained_class, &emit_widen);
9019 CHECK_CFG_EXCEPTION;
9026 * We have the `constrained.' prefix opcode.
9028 if (constrained_partial_call) {
9029 gboolean need_box = TRUE;
9032 * The receiver is a valuetype, but the exact type is not known at compile time. This means the
9033 * called method is not known at compile time either. The called method could end up being
9034 * one of the methods on the parent classes (object/valuetype/enum), in which case we need
9035 * to box the receiver.
9036 * A simple solution would be to box always and make a normal virtual call, but that would
9037 * be bad performance wise.
9039 if (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE && cmethod->klass->generic_class) {
9041 * The parent classes implement no generic interfaces, so the called method will be a vtype method, so no boxing neccessary.
9046 if (!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
9047 /* The called method is not virtual, i.e. Object:GetType (), the receiver is a vtype, has to box */
9048 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
9049 ins->klass = constrained_class;
9050 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
9051 CHECK_CFG_EXCEPTION;
9052 } else if (need_box) {
9054 MonoBasicBlock *is_ref_bb, *end_bb;
9055 MonoInst *nonbox_call;
9058 * Determine at runtime whenever the called method is defined on object/valuetype/enum, and emit a boxing call
9060 * FIXME: It is possible to inline the called method in a lot of cases, i.e. for T_INT,
9061 * the no-box case goes to a method in Int32, while the box case goes to a method in Enum.
9063 addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
9065 NEW_BBLOCK (cfg, is_ref_bb);
9066 NEW_BBLOCK (cfg, end_bb);
9068 box_type = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_BOX_TYPE);
9069 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, box_type->dreg, MONO_GSHAREDVT_BOX_TYPE_REF);
9070 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
9073 nonbox_call = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9075 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
9078 MONO_START_BB (cfg, is_ref_bb);
9079 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
9080 ins->klass = constrained_class;
9081 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
9082 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9084 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
9086 MONO_START_BB (cfg, end_bb);
9089 nonbox_call->dreg = ins->dreg;
9092 g_assert (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE);
9093 addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
9094 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9097 } else if (constrained_class->valuetype && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
9099 * The type parameter is instantiated as a valuetype,
9100 * but that type doesn't override the method we're
9101 * calling, so we need to box `this'.
9103 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
9104 ins->klass = constrained_class;
9105 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
9106 CHECK_CFG_EXCEPTION;
9107 } else if (!constrained_class->valuetype) {
9108 int dreg = alloc_ireg_ref (cfg);
9111 * The type parameter is instantiated as a reference
9112 * type. We have a managed pointer on the stack, so
9113 * we need to dereference it here.
9115 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
9116 ins->type = STACK_OBJ;
9119 if (cmethod->klass->valuetype) {
9122 /* Interface method */
9125 mono_class_setup_vtable (constrained_class);
9126 CHECK_TYPELOAD (constrained_class);
9127 ioffset = mono_class_interface_offset (constrained_class, cmethod->klass);
9129 TYPE_LOAD_ERROR (constrained_class);
9130 slot = mono_method_get_vtable_slot (cmethod);
9132 TYPE_LOAD_ERROR (cmethod->klass);
9133 cmethod = constrained_class->vtable [ioffset + slot];
9135 if (cmethod->klass == mono_defaults.enum_class) {
9136 /* Enum implements some interfaces, so treat this as the first case */
9137 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
9138 ins->klass = constrained_class;
9139 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
9140 CHECK_CFG_EXCEPTION;
9145 constrained_class = NULL;
9148 if (check_call_signature (cfg, fsig, sp))
9151 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (cmethod->name, "Invoke"))
9152 delegate_invoke = TRUE;
9154 if ((cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_sharable_method (cfg, cmethod, fsig, sp))) {
9155 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9156 type_to_eval_stack_type ((cfg), fsig->ret, ins);
9164 * If the callee is a shared method, then its static cctor
9165 * might not get called after the call was patched.
9167 if (cfg->gshared && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
9168 emit_class_init (cfg, cmethod->klass);
9169 CHECK_TYPELOAD (cmethod->klass);
9172 check_method_sharing (cfg, cmethod, &pass_vtable, &pass_mrgctx);
9175 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
9177 context_used = mini_method_check_context_used (cfg, cmethod);
9179 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
9180 /* Generic method interface
9181 calls are resolved via a
9182 helper function and don't
9184 if (!cmethod_context || !cmethod_context->method_inst)
9185 pass_imt_from_rgctx = TRUE;
9189 * If a shared method calls another
9190 * shared method then the caller must
9191 * have a generic sharing context
9192 * because the magic trampoline
9193 * requires it. FIXME: We shouldn't
9194 * have to force the vtable/mrgctx
9195 * variable here. Instead there
9196 * should be a flag in the cfg to
9197 * request a generic sharing context.
9200 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
9201 mono_get_vtable_var (cfg);
9206 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
9208 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
9210 CHECK_TYPELOAD (cmethod->klass);
9211 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
9216 g_assert (!vtable_arg);
9218 if (!cfg->compile_aot) {
9220 * emit_get_rgctx_method () calls mono_class_vtable () so check
9221 * for type load errors before.
9223 mono_class_setup_vtable (cmethod->klass);
9224 CHECK_TYPELOAD (cmethod->klass);
9227 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
9229 /* !marshalbyref is needed to properly handle generic methods + remoting */
9230 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
9231 MONO_METHOD_IS_FINAL (cmethod)) &&
9232 !mono_class_is_marshalbyref (cmethod->klass)) {
9239 if (pass_imt_from_rgctx) {
9240 g_assert (!pass_vtable);
9242 imt_arg = emit_get_rgctx_method (cfg, context_used,
9243 cmethod, MONO_RGCTX_INFO_METHOD);
9247 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
9249 /* Calling virtual generic methods */
9250 if (virtual && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
9251 !(MONO_METHOD_IS_FINAL (cmethod) &&
9252 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
9253 fsig->generic_param_count &&
9254 !(cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) &&
9256 MonoInst *this_temp, *this_arg_temp, *store;
9257 MonoInst *iargs [4];
9259 g_assert (fsig->is_inflated);
9261 /* Prevent inlining of methods that contain indirect calls */
9262 INLINE_FAILURE ("virtual generic call");
9264 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig))
9265 GSHAREDVT_FAILURE (*ip);
9267 if (cfg->backend->have_generalized_imt_thunk && cfg->backend->gshared_supported && cmethod->wrapper_type == MONO_WRAPPER_NONE) {
9268 g_assert (!imt_arg);
9270 g_assert (cmethod->is_inflated);
9271 imt_arg = emit_get_rgctx_method (cfg, context_used,
9272 cmethod, MONO_RGCTX_INFO_METHOD);
9273 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, sp [0], imt_arg, NULL);
9275 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
9276 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
9277 MONO_ADD_INS (cfg->cbb, store);
9279 /* FIXME: This should be a managed pointer */
9280 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
9282 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
9283 iargs [1] = emit_get_rgctx_method (cfg, context_used,
9284 cmethod, MONO_RGCTX_INFO_METHOD);
9285 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
9286 addr = mono_emit_jit_icall (cfg,
9287 mono_helper_compile_generic_method, iargs);
9289 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
9291 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9298 * Implement a workaround for the inherent races involved in locking:
9304 * If a thread abort happens between the call to Monitor.Enter () and the start of the
9305 * try block, the Exit () won't be executed, see:
9306 * http://www.bluebytesoftware.com/blog/2007/01/30/MonitorEnterThreadAbortsAndOrphanedLocks.aspx
9307 * To work around this, we extend such try blocks to include the last x bytes
9308 * of the Monitor.Enter () call.
9310 if (cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
9311 MonoBasicBlock *tbb;
9313 GET_BBLOCK (cfg, tbb, ip + 5);
9315 * Only extend try blocks with a finally, to avoid catching exceptions thrown
9316 * from Monitor.Enter like ArgumentNullException.
9318 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
9319 /* Mark this bblock as needing to be extended */
9320 tbb->extend_try_block = TRUE;
9324 /* Conversion to a JIT intrinsic */
9325 if ((cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
9326 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9327 type_to_eval_stack_type ((cfg), fsig->ret, ins);
9334 if ((cfg->opt & MONO_OPT_INLINE) &&
9335 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
9336 mono_method_check_inlining (cfg, cmethod)) {
9338 gboolean always = FALSE;
9340 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
9341 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
9342 /* Prevent inlining of methods that call wrappers */
9343 INLINE_FAILURE ("wrapper call");
9344 cmethod = mono_marshal_get_native_wrapper (cmethod, TRUE, FALSE);
9348 costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, always);
9350 cfg->real_offset += 5;
9352 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9353 /* *sp is already set by inline_method */
9358 inline_costs += costs;
9364 /* Tail recursion elimination */
9365 if ((cfg->opt & MONO_OPT_TAILC) && call_opcode == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
9366 gboolean has_vtargs = FALSE;
9369 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
9370 INLINE_FAILURE ("tail call");
9372 /* keep it simple */
9373 for (i = fsig->param_count - 1; i >= 0; i--) {
9374 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
9379 for (i = 0; i < n; ++i)
9380 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
9381 MONO_INST_NEW (cfg, ins, OP_BR);
9382 MONO_ADD_INS (cfg->cbb, ins);
9383 tblock = start_bblock->out_bb [0];
9384 link_bblock (cfg, cfg->cbb, tblock);
9385 ins->inst_target_bb = tblock;
9386 start_new_bblock = 1;
9388 /* skip the CEE_RET, too */
9389 if (ip_in_bb (cfg, cfg->cbb, ip + 5))
9396 inline_costs += 10 * num_calls++;
9399 * Making generic calls out of gsharedvt methods.
9400 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
9401 * patching gshared method addresses into a gsharedvt method.
9403 if (cfg->gsharedvt && (mini_is_gsharedvt_signature (fsig) || cmethod->is_inflated || cmethod->klass->generic_class) &&
9404 !(cmethod->klass->rank && cmethod->klass->byval_arg.type != MONO_TYPE_SZARRAY)) {
9405 MonoRgctxInfoType info_type;
9408 //if (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)
9409 //GSHAREDVT_FAILURE (*ip);
9410 // disable for possible remoting calls
9411 if (fsig->hasthis && (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class))
9412 GSHAREDVT_FAILURE (*ip);
9413 if (fsig->generic_param_count) {
9414 /* virtual generic call */
9415 g_assert (!imt_arg);
9416 /* Same as the virtual generic case above */
9417 imt_arg = emit_get_rgctx_method (cfg, context_used,
9418 cmethod, MONO_RGCTX_INFO_METHOD);
9419 /* This is not needed, as the trampoline code will pass one, and it might be passed in the same reg as the imt arg */
9421 } else if ((cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) && !imt_arg) {
9422 /* This can happen when we call a fully instantiated iface method */
9423 imt_arg = emit_get_rgctx_method (cfg, context_used,
9424 cmethod, MONO_RGCTX_INFO_METHOD);
9429 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (cmethod->name, "Invoke")))
9430 keep_this_alive = sp [0];
9432 if (virtual && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))
9433 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE_VIRT;
9435 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE;
9436 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, info_type);
9438 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
9442 /* Generic sharing */
9445 * Use this if the callee is gsharedvt sharable too, since
9446 * at runtime we might find an instantiation so the call cannot
9447 * be patched (the 'no_patch' code path in mini-trampolines.c).
9449 if (context_used && !imt_arg && !array_rank && !delegate_invoke &&
9450 (!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
9451 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
9452 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
9453 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
9454 INLINE_FAILURE ("gshared");
9456 g_assert (cfg->gshared && cmethod);
9460 * We are compiling a call to a
9461 * generic method from shared code,
9462 * which means that we have to look up
9463 * the method in the rgctx and do an
9467 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
9469 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
9470 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
9474 /* Direct calls to icalls */
9476 MonoMethod *wrapper;
9479 /* Inline the wrapper */
9480 wrapper = mono_marshal_get_native_wrapper (cmethod, TRUE, cfg->compile_aot);
9482 costs = inline_method (cfg, wrapper, fsig, sp, ip, cfg->real_offset, TRUE);
9483 g_assert (costs > 0);
9484 cfg->real_offset += 5;
9486 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9487 /* *sp is already set by inline_method */
9492 inline_costs += costs;
9501 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
9502 MonoInst *val = sp [fsig->param_count];
9504 if (val->type == STACK_OBJ) {
9505 MonoInst *iargs [2];
9510 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
9513 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
9514 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, val->dreg);
9515 if (cfg->gen_write_barriers && val->type == STACK_OBJ && !(val->opcode == OP_PCONST && val->inst_c0 == 0))
9516 emit_write_barrier (cfg, addr, val);
9517 if (cfg->gen_write_barriers && mini_is_gsharedvt_klass (cmethod->klass))
9518 GSHAREDVT_FAILURE (*ip);
9519 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
9520 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
9522 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
9523 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
9524 if (!cmethod->klass->element_class->valuetype && !readonly)
9525 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
9526 CHECK_TYPELOAD (cmethod->klass);
9529 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
9532 g_assert_not_reached ();
9539 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
9543 /* Tail prefix / tail call optimization */
9545 /* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests */
9546 /* FIXME: runtime generic context pointer for jumps? */
9547 /* FIXME: handle this for generic sharing eventually */
9548 if ((ins_flag & MONO_INST_TAILCALL) &&
9549 !vtable_arg && !cfg->gshared && is_supported_tail_call (cfg, method, cmethod, fsig, call_opcode))
9550 supported_tail_call = TRUE;
9552 if (supported_tail_call) {
9555 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
9556 INLINE_FAILURE ("tail call");
9558 //printf ("HIT: %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
9560 if (cfg->backend->have_op_tail_call) {
9561 /* Handle tail calls similarly to normal calls */
9564 emit_instrumentation_call (cfg, mono_profiler_method_leave);
9566 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
9567 call->tail_call = TRUE;
9568 call->method = cmethod;
9569 call->signature = mono_method_signature (cmethod);
9572 * We implement tail calls by storing the actual arguments into the
9573 * argument variables, then emitting a CEE_JMP.
9575 for (i = 0; i < n; ++i) {
9576 /* Prevent argument from being register allocated */
9577 arg_array [i]->flags |= MONO_INST_VOLATILE;
9578 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
9580 ins = (MonoInst*)call;
9581 ins->inst_p0 = cmethod;
9582 ins->inst_p1 = arg_array [0];
9583 MONO_ADD_INS (cfg->cbb, ins);
9584 link_bblock (cfg, cfg->cbb, end_bblock);
9585 start_new_bblock = 1;
9587 // FIXME: Eliminate unreachable epilogs
9590 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
9591 * only reachable from this call.
9593 GET_BBLOCK (cfg, tblock, ip + 5);
9594 if (tblock == cfg->cbb || tblock->in_count == 0)
9603 * Synchronized wrappers.
9604 * Its hard to determine where to replace a method with its synchronized
9605 * wrapper without causing an infinite recursion. The current solution is
9606 * to add the synchronized wrapper in the trampolines, and to
9607 * change the called method to a dummy wrapper, and resolve that wrapper
9608 * to the real method in mono_jit_compile_method ().
9610 if (cfg->method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
9611 MonoMethod *orig = mono_marshal_method_from_wrapper (cfg->method);
9612 if (cmethod == orig || (cmethod->is_inflated && mono_method_get_declaring_generic_method (cmethod) == orig))
9613 cmethod = mono_marshal_get_synchronized_inner_wrapper (cmethod);
9617 * Interface calls in llvm-only mode are complicated becase the callee might need an rgctx arg,
9618 * (i.e. its a vtype method), and there is no way to for the caller to know this at compile time.
9619 * So we make resolve_iface_call return the rgctx, and do two calls with different signatures
9620 * based on whenever there is an rgctx or not.
9622 if (cfg->llvm_only && virtual && cmethod && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
9623 MonoInst *args_buf [16], *icall_args [16];
9625 MonoBasicBlock *rgctx_bb, *end_bb;
9626 MonoInst *call1, *call2, *call_target;
9627 MonoMethodSignature *rgctx_sig;
9628 int rgctx_reg, tmp_reg;
9630 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
9632 NEW_BBLOCK (cfg, rgctx_bb);
9633 NEW_BBLOCK (cfg, end_bb);
9635 // FIXME: Optimize this
9637 guint32 imt_slot = mono_method_get_imt_slot (cmethod);
9639 icall_args [0] = sp [0];
9640 EMIT_NEW_ICONST (cfg, icall_args [1], imt_slot);
9642 icall_args [2] = imt_arg;
9644 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_METHODCONST, cmethod);
9645 icall_args [2] = ins;
9648 rgctx_reg = alloc_preg (cfg);
9649 MONO_EMIT_NEW_PCONST (cfg, rgctx_reg, NULL);
9650 EMIT_NEW_VARLOADA_VREG (cfg, icall_args [3], rgctx_reg, &mono_defaults.int_class->byval_arg);
9651 //EMIT_NEW_PCONST (cfg, icall_args [3], NULL);
9653 call_target = mono_emit_jit_icall (cfg, mono_resolve_iface_call, icall_args);
9655 // FIXME: Only do this if needed (generic calls)
9657 // Check whenever to pass an rgctx
9658 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rgctx_reg, 0);
9659 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBNE_UN, rgctx_bb);
9660 /* Non rgctx case */
9661 call1 = mono_emit_calli (cfg, fsig, sp, call_target, NULL, vtable_arg);
9662 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
9664 MONO_START_BB (cfg, rgctx_bb);
9665 /* Make a call with an rgctx */
9666 if (fsig->param_count + 2 < 16)
9669 args = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (fsig->param_count + 2));
9671 for (i = 0; i < fsig->param_count; ++i)
9672 args [i + 1] = sp [i + 1];
9673 tmp_reg = alloc_preg (cfg);
9674 EMIT_NEW_UNALU (cfg, args [fsig->param_count + 1], OP_MOVE, tmp_reg, rgctx_reg);
9675 rgctx_sig = sig_to_rgctx_sig (fsig);
9676 call2 = mono_emit_calli (cfg, rgctx_sig, args, call_target, NULL, NULL);
9677 call2->dreg = call1->dreg;
9678 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
9680 MONO_START_BB (cfg, end_bb);
9686 INLINE_FAILURE ("call");
9687 ins = mono_emit_method_call_full (cfg, cmethod, fsig, tail_call, sp, virtual ? sp [0] : NULL,
9688 imt_arg, vtable_arg);
9690 if (tail_call && !cfg->llvm_only) {
9691 link_bblock (cfg, cfg->cbb, end_bblock);
9692 start_new_bblock = 1;
9694 // FIXME: Eliminate unreachable epilogs
9697 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
9698 * only reachable from this call.
9700 GET_BBLOCK (cfg, tblock, ip + 5);
9701 if (tblock == cfg->cbb || tblock->in_count == 0)
9708 /* End of call, INS should contain the result of the call, if any */
9710 if (push_res && !MONO_TYPE_IS_VOID (fsig->ret)) {
9713 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
9718 if (keep_this_alive) {
9719 MonoInst *dummy_use;
9721 /* See mono_emit_method_call_full () */
9722 EMIT_NEW_DUMMY_USE (cfg, dummy_use, keep_this_alive);
9725 CHECK_CFG_EXCEPTION;
9729 g_assert (*ip == CEE_RET);
9733 constrained_class = NULL;
9735 emit_seq_point (cfg, method, ip, FALSE, TRUE);
9739 if (cfg->method != method) {
9740 /* return from inlined method */
9742 * If in_count == 0, that means the ret is unreachable due to
9743 * being preceeded by a throw. In that case, inline_method () will
9744 * handle setting the return value
9745 * (test case: test_0_inline_throw ()).
9747 if (return_var && cfg->cbb->in_count) {
9748 MonoType *ret_type = mono_method_signature (method)->ret;
9754 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
9757 //g_assert (returnvar != -1);
9758 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
9759 cfg->ret_var_set = TRUE;
9762 emit_instrumentation_call (cfg, mono_profiler_method_leave);
9764 if (cfg->lmf_var && cfg->cbb->in_count && !cfg->llvm_only)
9768 MonoType *ret_type = mini_get_underlying_type (mono_method_signature (method)->ret);
9770 if (seq_points && !sym_seq_points) {
9772 * Place a seq point here too even through the IL stack is not
9773 * empty, so a step over on
9776 * will work correctly.
9778 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
9779 MONO_ADD_INS (cfg->cbb, ins);
9782 g_assert (!return_var);
9786 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
9789 emit_setret (cfg, *sp);
9792 if (sp != stack_start)
9794 MONO_INST_NEW (cfg, ins, OP_BR);
9796 ins->inst_target_bb = end_bblock;
9797 MONO_ADD_INS (cfg->cbb, ins);
9798 link_bblock (cfg, cfg->cbb, end_bblock);
9799 start_new_bblock = 1;
9803 MONO_INST_NEW (cfg, ins, OP_BR);
9805 target = ip + 1 + (signed char)(*ip);
9807 GET_BBLOCK (cfg, tblock, target);
9808 link_bblock (cfg, cfg->cbb, tblock);
9809 ins->inst_target_bb = tblock;
9810 if (sp != stack_start) {
9811 handle_stack_args (cfg, stack_start, sp - stack_start);
9813 CHECK_UNVERIFIABLE (cfg);
9815 MONO_ADD_INS (cfg->cbb, ins);
9816 start_new_bblock = 1;
9817 inline_costs += BRANCH_COST;
9831 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
9833 target = ip + 1 + *(signed char*)ip;
9839 inline_costs += BRANCH_COST;
9843 MONO_INST_NEW (cfg, ins, OP_BR);
9846 target = ip + 4 + (gint32)read32(ip);
9848 GET_BBLOCK (cfg, tblock, target);
9849 link_bblock (cfg, cfg->cbb, tblock);
9850 ins->inst_target_bb = tblock;
9851 if (sp != stack_start) {
9852 handle_stack_args (cfg, stack_start, sp - stack_start);
9854 CHECK_UNVERIFIABLE (cfg);
9857 MONO_ADD_INS (cfg->cbb, ins);
9859 start_new_bblock = 1;
9860 inline_costs += BRANCH_COST;
9867 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
9868 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
9869 guint32 opsize = is_short ? 1 : 4;
9871 CHECK_OPSIZE (opsize);
9873 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
9876 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
9881 GET_BBLOCK (cfg, tblock, target);
9882 link_bblock (cfg, cfg->cbb, tblock);
9883 GET_BBLOCK (cfg, tblock, ip);
9884 link_bblock (cfg, cfg->cbb, tblock);
9886 if (sp != stack_start) {
9887 handle_stack_args (cfg, stack_start, sp - stack_start);
9888 CHECK_UNVERIFIABLE (cfg);
9891 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
9892 cmp->sreg1 = sp [0]->dreg;
9893 type_from_op (cfg, cmp, sp [0], NULL);
9896 #if SIZEOF_REGISTER == 4
9897 if (cmp->opcode == OP_LCOMPARE_IMM) {
9898 /* Convert it to OP_LCOMPARE */
9899 MONO_INST_NEW (cfg, ins, OP_I8CONST);
9900 ins->type = STACK_I8;
9901 ins->dreg = alloc_dreg (cfg, STACK_I8);
9903 MONO_ADD_INS (cfg->cbb, ins);
9904 cmp->opcode = OP_LCOMPARE;
9905 cmp->sreg2 = ins->dreg;
9908 MONO_ADD_INS (cfg->cbb, cmp);
9910 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
9911 type_from_op (cfg, ins, sp [0], NULL);
9912 MONO_ADD_INS (cfg->cbb, ins);
9913 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
9914 GET_BBLOCK (cfg, tblock, target);
9915 ins->inst_true_bb = tblock;
9916 GET_BBLOCK (cfg, tblock, ip);
9917 ins->inst_false_bb = tblock;
9918 start_new_bblock = 2;
9921 inline_costs += BRANCH_COST;
9936 MONO_INST_NEW (cfg, ins, *ip);
9938 target = ip + 4 + (gint32)read32(ip);
9944 inline_costs += BRANCH_COST;
9948 MonoBasicBlock **targets;
9949 MonoBasicBlock *default_bblock;
9950 MonoJumpInfoBBTable *table;
9951 int offset_reg = alloc_preg (cfg);
9952 int target_reg = alloc_preg (cfg);
9953 int table_reg = alloc_preg (cfg);
9954 int sum_reg = alloc_preg (cfg);
9955 gboolean use_op_switch;
9959 n = read32 (ip + 1);
9962 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
9966 CHECK_OPSIZE (n * sizeof (guint32));
9967 target = ip + n * sizeof (guint32);
9969 GET_BBLOCK (cfg, default_bblock, target);
9970 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
9972 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
9973 for (i = 0; i < n; ++i) {
9974 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
9975 targets [i] = tblock;
9976 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
9980 if (sp != stack_start) {
9982 * Link the current bb with the targets as well, so handle_stack_args
9983 * will set their in_stack correctly.
9985 link_bblock (cfg, cfg->cbb, default_bblock);
9986 for (i = 0; i < n; ++i)
9987 link_bblock (cfg, cfg->cbb, targets [i]);
9989 handle_stack_args (cfg, stack_start, sp - stack_start);
9991 CHECK_UNVERIFIABLE (cfg);
9993 /* Undo the links */
9994 mono_unlink_bblock (cfg, cfg->cbb, default_bblock);
9995 for (i = 0; i < n; ++i)
9996 mono_unlink_bblock (cfg, cfg->cbb, targets [i]);
9999 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
10000 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
10002 for (i = 0; i < n; ++i)
10003 link_bblock (cfg, cfg->cbb, targets [i]);
10005 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
10006 table->table = targets;
10007 table->table_size = n;
10009 use_op_switch = FALSE;
10011 /* ARM implements SWITCH statements differently */
10012 /* FIXME: Make it use the generic implementation */
10013 if (!cfg->compile_aot)
10014 use_op_switch = TRUE;
10017 if (COMPILE_LLVM (cfg))
10018 use_op_switch = TRUE;
10020 cfg->cbb->has_jump_table = 1;
10022 if (use_op_switch) {
10023 MONO_INST_NEW (cfg, ins, OP_SWITCH);
10024 ins->sreg1 = src1->dreg;
10025 ins->inst_p0 = table;
10026 ins->inst_many_bb = targets;
10027 ins->klass = GUINT_TO_POINTER (n);
10028 MONO_ADD_INS (cfg->cbb, ins);
10030 if (sizeof (gpointer) == 8)
10031 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
10033 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
10035 #if SIZEOF_REGISTER == 8
10036 /* The upper word might not be zero, and we add it to a 64 bit address later */
10037 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
10040 if (cfg->compile_aot) {
10041 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
10043 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
10044 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
10045 ins->inst_p0 = table;
10046 ins->dreg = table_reg;
10047 MONO_ADD_INS (cfg->cbb, ins);
10050 /* FIXME: Use load_memindex */
10051 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
10052 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
10053 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
10055 start_new_bblock = 1;
10056 inline_costs += (BRANCH_COST * 2);
10069 case CEE_LDIND_REF:
10076 dreg = alloc_freg (cfg);
10079 dreg = alloc_lreg (cfg);
10081 case CEE_LDIND_REF:
10082 dreg = alloc_ireg_ref (cfg);
10085 dreg = alloc_preg (cfg);
10088 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
10089 ins->type = ldind_type [*ip - CEE_LDIND_I1];
10090 if (*ip == CEE_LDIND_R4)
10091 ins->type = cfg->r4_stack_type;
10092 ins->flags |= ins_flag;
10093 MONO_ADD_INS (cfg->cbb, ins);
10095 if (ins_flag & MONO_INST_VOLATILE) {
10096 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10097 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10102 case CEE_STIND_REF:
10113 if (ins_flag & MONO_INST_VOLATILE) {
10114 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
10115 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
10118 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
10119 ins->flags |= ins_flag;
10122 MONO_ADD_INS (cfg->cbb, ins);
10124 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0)))
10125 emit_write_barrier (cfg, sp [0], sp [1]);
10134 MONO_INST_NEW (cfg, ins, (*ip));
10136 ins->sreg1 = sp [0]->dreg;
10137 ins->sreg2 = sp [1]->dreg;
10138 type_from_op (cfg, ins, sp [0], sp [1]);
10140 ins->dreg = alloc_dreg ((cfg), (ins)->type);
10142 /* Use the immediate opcodes if possible */
10143 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
10144 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
10145 if (imm_opcode != -1) {
10146 ins->opcode = imm_opcode;
10147 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
10150 NULLIFY_INS (sp [1]);
10154 MONO_ADD_INS ((cfg)->cbb, (ins));
10156 *sp++ = mono_decompose_opcode (cfg, ins);
10173 MONO_INST_NEW (cfg, ins, (*ip));
10175 ins->sreg1 = sp [0]->dreg;
10176 ins->sreg2 = sp [1]->dreg;
10177 type_from_op (cfg, ins, sp [0], sp [1]);
10179 add_widen_op (cfg, ins, &sp [0], &sp [1]);
10180 ins->dreg = alloc_dreg ((cfg), (ins)->type);
10182 /* FIXME: Pass opcode to is_inst_imm */
10184 /* Use the immediate opcodes if possible */
10185 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
10188 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
10189 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
10190 /* Keep emulated opcodes which are optimized away later */
10191 if ((ins->opcode == OP_IREM_UN || ins->opcode == OP_IDIV_UN_IMM) && (cfg->opt & (MONO_OPT_CONSPROP | MONO_OPT_COPYPROP)) && sp [1]->opcode == OP_ICONST && mono_is_power_of_two (sp [1]->inst_c0) >= 0) {
10192 imm_opcode = mono_op_to_op_imm (ins->opcode);
10195 if (imm_opcode != -1) {
10196 ins->opcode = imm_opcode;
10197 if (sp [1]->opcode == OP_I8CONST) {
10198 #if SIZEOF_REGISTER == 8
10199 ins->inst_imm = sp [1]->inst_l;
10201 ins->inst_ls_word = sp [1]->inst_ls_word;
10202 ins->inst_ms_word = sp [1]->inst_ms_word;
10206 ins->inst_imm = (gssize)(sp [1]->inst_c0);
10209 /* Might be followed by an instruction added by add_widen_op */
10210 if (sp [1]->next == NULL)
10211 NULLIFY_INS (sp [1]);
10214 MONO_ADD_INS ((cfg)->cbb, (ins));
10216 *sp++ = mono_decompose_opcode (cfg, ins);
10229 case CEE_CONV_OVF_I8:
10230 case CEE_CONV_OVF_U8:
10231 case CEE_CONV_R_UN:
10234 /* Special case this earlier so we have long constants in the IR */
10235 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
10236 int data = sp [-1]->inst_c0;
10237 sp [-1]->opcode = OP_I8CONST;
10238 sp [-1]->type = STACK_I8;
10239 #if SIZEOF_REGISTER == 8
10240 if ((*ip) == CEE_CONV_U8)
10241 sp [-1]->inst_c0 = (guint32)data;
10243 sp [-1]->inst_c0 = data;
10245 sp [-1]->inst_ls_word = data;
10246 if ((*ip) == CEE_CONV_U8)
10247 sp [-1]->inst_ms_word = 0;
10249 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
10251 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
10258 case CEE_CONV_OVF_I4:
10259 case CEE_CONV_OVF_I1:
10260 case CEE_CONV_OVF_I2:
10261 case CEE_CONV_OVF_I:
10262 case CEE_CONV_OVF_U:
10265 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
10266 ADD_UNOP (CEE_CONV_OVF_I8);
10273 case CEE_CONV_OVF_U1:
10274 case CEE_CONV_OVF_U2:
10275 case CEE_CONV_OVF_U4:
10278 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
10279 ADD_UNOP (CEE_CONV_OVF_U8);
10286 case CEE_CONV_OVF_I1_UN:
10287 case CEE_CONV_OVF_I2_UN:
10288 case CEE_CONV_OVF_I4_UN:
10289 case CEE_CONV_OVF_I8_UN:
10290 case CEE_CONV_OVF_U1_UN:
10291 case CEE_CONV_OVF_U2_UN:
10292 case CEE_CONV_OVF_U4_UN:
10293 case CEE_CONV_OVF_U8_UN:
10294 case CEE_CONV_OVF_I_UN:
10295 case CEE_CONV_OVF_U_UN:
10302 CHECK_CFG_EXCEPTION;
10306 case CEE_ADD_OVF_UN:
10308 case CEE_MUL_OVF_UN:
10310 case CEE_SUB_OVF_UN:
10316 GSHAREDVT_FAILURE (*ip);
10319 token = read32 (ip + 1);
10320 klass = mini_get_class (method, token, generic_context);
10321 CHECK_TYPELOAD (klass);
10323 if (generic_class_is_reference_type (cfg, klass)) {
10324 MonoInst *store, *load;
10325 int dreg = alloc_ireg_ref (cfg);
10327 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
10328 load->flags |= ins_flag;
10329 MONO_ADD_INS (cfg->cbb, load);
10331 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
10332 store->flags |= ins_flag;
10333 MONO_ADD_INS (cfg->cbb, store);
10335 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER)
10336 emit_write_barrier (cfg, sp [0], sp [1]);
10338 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
10344 int loc_index = -1;
10350 token = read32 (ip + 1);
10351 klass = mini_get_class (method, token, generic_context);
10352 CHECK_TYPELOAD (klass);
10354 /* Optimize the common ldobj+stloc combination */
10357 loc_index = ip [6];
10364 loc_index = ip [5] - CEE_STLOC_0;
10371 if ((loc_index != -1) && ip_in_bb (cfg, cfg->cbb, ip + 5)) {
10372 CHECK_LOCAL (loc_index);
10374 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
10375 ins->dreg = cfg->locals [loc_index]->dreg;
10376 ins->flags |= ins_flag;
10379 if (ins_flag & MONO_INST_VOLATILE) {
10380 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10381 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10387 /* Optimize the ldobj+stobj combination */
10388 /* The reference case ends up being a load+store anyway */
10389 /* Skip this if the operation is volatile. */
10390 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass) && !(ins_flag & MONO_INST_VOLATILE)) {
10395 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
10402 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
10403 ins->flags |= ins_flag;
10406 if (ins_flag & MONO_INST_VOLATILE) {
10407 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10408 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10417 CHECK_STACK_OVF (1);
10419 n = read32 (ip + 1);
10421 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
10422 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
10423 ins->type = STACK_OBJ;
10426 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
10427 MonoInst *iargs [1];
10428 char *str = mono_method_get_wrapper_data (method, n);
10430 if (cfg->compile_aot)
10431 EMIT_NEW_LDSTRLITCONST (cfg, iargs [0], str);
10433 EMIT_NEW_PCONST (cfg, iargs [0], str);
10434 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
10436 if (cfg->opt & MONO_OPT_SHARED) {
10437 MonoInst *iargs [3];
10439 if (cfg->compile_aot) {
10440 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
10442 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10443 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
10444 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
10445 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
10446 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
10448 if (cfg->cbb->out_of_line) {
10449 MonoInst *iargs [2];
10451 if (image == mono_defaults.corlib) {
10453 * Avoid relocations in AOT and save some space by using a
10454 * version of helper_ldstr specialized to mscorlib.
10456 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
10457 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
10459 /* Avoid creating the string object */
10460 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
10461 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
10462 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
10466 if (cfg->compile_aot) {
10467 NEW_LDSTRCONST (cfg, ins, image, n);
10469 MONO_ADD_INS (cfg->cbb, ins);
10472 NEW_PCONST (cfg, ins, NULL);
10473 ins->type = STACK_OBJ;
10474 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
10476 OUT_OF_MEMORY_FAILURE;
10479 MONO_ADD_INS (cfg->cbb, ins);
10488 MonoInst *iargs [2];
10489 MonoMethodSignature *fsig;
10492 MonoInst *vtable_arg = NULL;
10495 token = read32 (ip + 1);
10496 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
10497 if (!cmethod || mono_loader_get_last_error ())
10499 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
10502 mono_save_token_info (cfg, image, token, cmethod);
10504 if (!mono_class_init (cmethod->klass))
10505 TYPE_LOAD_ERROR (cmethod->klass);
10507 context_used = mini_method_check_context_used (cfg, cmethod);
10509 if (mono_security_core_clr_enabled ())
10510 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
10512 if (cfg->gshared && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
10513 emit_class_init (cfg, cmethod->klass);
10514 CHECK_TYPELOAD (cmethod->klass);
10518 if (cfg->gsharedvt) {
10519 if (mini_is_gsharedvt_variable_signature (sig))
10520 GSHAREDVT_FAILURE (*ip);
10524 n = fsig->param_count;
10528 * Generate smaller code for the common newobj <exception> instruction in
10529 * argument checking code.
10531 if (cfg->cbb->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
10532 is_exception_class (cmethod->klass) && n <= 2 &&
10533 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
10534 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
10535 MonoInst *iargs [3];
10539 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
10542 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
10545 iargs [1] = sp [0];
10546 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
10549 iargs [1] = sp [0];
10550 iargs [2] = sp [1];
10551 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
10554 g_assert_not_reached ();
10562 /* move the args to allow room for 'this' in the first position */
10568 /* check_call_signature () requires sp[0] to be set */
10569 this_ins.type = STACK_OBJ;
10570 sp [0] = &this_ins;
10571 if (check_call_signature (cfg, fsig, sp))
10576 if (mini_class_is_system_array (cmethod->klass)) {
10577 *sp = emit_get_rgctx_method (cfg, context_used,
10578 cmethod, MONO_RGCTX_INFO_METHOD);
10580 /* Avoid varargs in the common case */
10581 if (fsig->param_count == 1)
10582 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
10583 else if (fsig->param_count == 2)
10584 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
10585 else if (fsig->param_count == 3)
10586 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
10587 else if (fsig->param_count == 4)
10588 alloc = mono_emit_jit_icall (cfg, mono_array_new_4, sp);
10590 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
10591 } else if (cmethod->string_ctor) {
10592 g_assert (!context_used);
10593 g_assert (!vtable_arg);
10594 /* we simply pass a null pointer */
10595 EMIT_NEW_PCONST (cfg, *sp, NULL);
10596 /* now call the string ctor */
10597 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, NULL, NULL, NULL);
10599 if (cmethod->klass->valuetype) {
10600 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
10601 emit_init_rvar (cfg, iargs [0]->dreg, &cmethod->klass->byval_arg);
10602 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
10607 * The code generated by mini_emit_virtual_call () expects
10608 * iargs [0] to be a boxed instance, but luckily the vcall
10609 * will be transformed into a normal call there.
10611 } else if (context_used) {
10612 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
10615 MonoVTable *vtable = NULL;
10617 if (!cfg->compile_aot)
10618 vtable = mono_class_vtable (cfg->domain, cmethod->klass);
10619 CHECK_TYPELOAD (cmethod->klass);
10622 * TypeInitializationExceptions thrown from the mono_runtime_class_init
10623 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
10624 * As a workaround, we call class cctors before allocating objects.
10626 if (mini_field_access_needs_cctor_run (cfg, method, cmethod->klass, vtable) && !(g_slist_find (class_inits, cmethod->klass))) {
10627 emit_class_init (cfg, cmethod->klass);
10628 if (cfg->verbose_level > 2)
10629 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
10630 class_inits = g_slist_prepend (class_inits, cmethod->klass);
10633 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
10636 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
10639 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
10641 /* Now call the actual ctor */
10642 handle_ctor_call (cfg, cmethod, fsig, context_used, sp, ip, &inline_costs);
10643 CHECK_CFG_EXCEPTION;
10646 if (alloc == NULL) {
10648 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
10649 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
10657 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip - header->code)))
10658 emit_seq_point (cfg, method, ip, FALSE, TRUE);
10661 case CEE_CASTCLASS:
10665 token = read32 (ip + 1);
10666 klass = mini_get_class (method, token, generic_context);
10667 CHECK_TYPELOAD (klass);
10668 if (sp [0]->type != STACK_OBJ)
10671 ins = handle_castclass (cfg, klass, *sp, ip, &inline_costs);
10672 CHECK_CFG_EXCEPTION;
10681 token = read32 (ip + 1);
10682 klass = mini_get_class (method, token, generic_context);
10683 CHECK_TYPELOAD (klass);
10684 if (sp [0]->type != STACK_OBJ)
10687 context_used = mini_class_check_context_used (cfg, klass);
10689 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
10690 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
10691 MonoInst *args [3];
10698 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
10701 idx = get_castclass_cache_idx (cfg);
10702 args [2] = emit_runtime_constant (cfg, MONO_PATCH_INFO_CASTCLASS_CACHE, GINT_TO_POINTER (idx));
10704 *sp++ = mono_emit_method_call (cfg, mono_isinst, args, NULL);
10707 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
10708 MonoMethod *mono_isinst;
10709 MonoInst *iargs [1];
10712 mono_isinst = mono_marshal_get_isinst (klass);
10713 iargs [0] = sp [0];
10715 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
10716 iargs, ip, cfg->real_offset, TRUE);
10717 CHECK_CFG_EXCEPTION;
10718 g_assert (costs > 0);
10721 cfg->real_offset += 5;
10725 inline_costs += costs;
10728 ins = handle_isinst (cfg, klass, *sp, context_used);
10729 CHECK_CFG_EXCEPTION;
10735 case CEE_UNBOX_ANY: {
10736 MonoInst *res, *addr;
10741 token = read32 (ip + 1);
10742 klass = mini_get_class (method, token, generic_context);
10743 CHECK_TYPELOAD (klass);
10745 mono_save_token_info (cfg, image, token, klass);
10747 context_used = mini_class_check_context_used (cfg, klass);
10749 if (mini_is_gsharedvt_klass (klass)) {
10750 res = handle_unbox_gsharedvt (cfg, klass, *sp);
10752 } else if (generic_class_is_reference_type (cfg, klass)) {
10753 res = handle_castclass (cfg, klass, *sp, ip, &inline_costs);
10754 CHECK_CFG_EXCEPTION;
10755 } else if (mono_class_is_nullable (klass)) {
10756 res = handle_unbox_nullable (cfg, *sp, klass, context_used);
10758 addr = handle_unbox (cfg, klass, sp, context_used);
10760 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
10771 MonoClass *enum_class;
10772 MonoMethod *has_flag;
10778 token = read32 (ip + 1);
10779 klass = mini_get_class (method, token, generic_context);
10780 CHECK_TYPELOAD (klass);
10782 mono_save_token_info (cfg, image, token, klass);
10784 context_used = mini_class_check_context_used (cfg, klass);
10786 if (generic_class_is_reference_type (cfg, klass)) {
10792 if (klass == mono_defaults.void_class)
10794 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
10796 /* frequent check in generic code: box (struct), brtrue */
10801 * <push int/long ptr>
10804 * constrained. MyFlags
10805 * callvirt instace bool class [mscorlib] System.Enum::HasFlag (class [mscorlib] System.Enum)
10807 * If we find this sequence and the operand types on box and constrained
10808 * are equal, we can emit a specialized instruction sequence instead of
10809 * the very slow HasFlag () call.
10811 if ((cfg->opt & MONO_OPT_INTRINS) &&
10812 /* Cheap checks first. */
10813 ip + 5 + 6 + 5 < end &&
10814 ip [5] == CEE_PREFIX1 &&
10815 ip [6] == CEE_CONSTRAINED_ &&
10816 ip [11] == CEE_CALLVIRT &&
10817 ip_in_bb (cfg, cfg->cbb, ip + 5 + 6 + 5) &&
10818 mono_class_is_enum (klass) &&
10819 (enum_class = mini_get_class (method, read32 (ip + 7), generic_context)) &&
10820 (has_flag = mini_get_method (cfg, method, read32 (ip + 12), NULL, generic_context)) &&
10821 has_flag->klass == mono_defaults.enum_class &&
10822 !strcmp (has_flag->name, "HasFlag") &&
10823 has_flag->signature->hasthis &&
10824 has_flag->signature->param_count == 1) {
10825 CHECK_TYPELOAD (enum_class);
10827 if (enum_class == klass) {
10828 MonoInst *enum_this, *enum_flag;
10833 enum_this = sp [0];
10834 enum_flag = sp [1];
10836 *sp++ = handle_enum_has_flag (cfg, klass, enum_this, enum_flag);
10841 // FIXME: LLVM can't handle the inconsistent bb linking
10842 if (!mono_class_is_nullable (klass) &&
10843 !mini_is_gsharedvt_klass (klass) &&
10844 ip + 5 < end && ip_in_bb (cfg, cfg->cbb, ip + 5) &&
10845 (ip [5] == CEE_BRTRUE ||
10846 ip [5] == CEE_BRTRUE_S ||
10847 ip [5] == CEE_BRFALSE ||
10848 ip [5] == CEE_BRFALSE_S)) {
10849 gboolean is_true = ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S;
10851 MonoBasicBlock *true_bb, *false_bb;
10855 if (cfg->verbose_level > 3) {
10856 printf ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
10857 printf ("<box+brtrue opt>\n");
10862 case CEE_BRFALSE_S:
10865 target = ip + 1 + (signed char)(*ip);
10872 target = ip + 4 + (gint)(read32 (ip));
10876 g_assert_not_reached ();
10880 * We need to link both bblocks, since it is needed for handling stack
10881 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
10882 * Branching to only one of them would lead to inconsistencies, so
10883 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
10885 GET_BBLOCK (cfg, true_bb, target);
10886 GET_BBLOCK (cfg, false_bb, ip);
10888 mono_link_bblock (cfg, cfg->cbb, true_bb);
10889 mono_link_bblock (cfg, cfg->cbb, false_bb);
10891 if (sp != stack_start) {
10892 handle_stack_args (cfg, stack_start, sp - stack_start);
10894 CHECK_UNVERIFIABLE (cfg);
10897 if (COMPILE_LLVM (cfg)) {
10898 dreg = alloc_ireg (cfg);
10899 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
10900 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
10902 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
10904 /* The JIT can't eliminate the iconst+compare */
10905 MONO_INST_NEW (cfg, ins, OP_BR);
10906 ins->inst_target_bb = is_true ? true_bb : false_bb;
10907 MONO_ADD_INS (cfg->cbb, ins);
10910 start_new_bblock = 1;
10914 *sp++ = handle_box (cfg, val, klass, context_used);
10916 CHECK_CFG_EXCEPTION;
10925 token = read32 (ip + 1);
10926 klass = mini_get_class (method, token, generic_context);
10927 CHECK_TYPELOAD (klass);
10929 mono_save_token_info (cfg, image, token, klass);
10931 context_used = mini_class_check_context_used (cfg, klass);
10933 if (mono_class_is_nullable (klass)) {
10936 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
10937 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
10941 ins = handle_unbox (cfg, klass, sp, context_used);
10954 MonoClassField *field;
10955 #ifndef DISABLE_REMOTING
10959 gboolean is_instance;
10961 gpointer addr = NULL;
10962 gboolean is_special_static;
10964 MonoInst *store_val = NULL;
10965 MonoInst *thread_ins;
10968 is_instance = (op == CEE_LDFLD || op == CEE_LDFLDA || op == CEE_STFLD);
10970 if (op == CEE_STFLD) {
10973 store_val = sp [1];
10978 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
10980 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
10983 if (op == CEE_STSFLD) {
10986 store_val = sp [0];
10991 token = read32 (ip + 1);
10992 if (method->wrapper_type != MONO_WRAPPER_NONE) {
10993 field = mono_method_get_wrapper_data (method, token);
10994 klass = field->parent;
10997 field = mono_field_from_token_checked (image, token, &klass, generic_context, &cfg->error);
11000 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
11001 FIELD_ACCESS_FAILURE (method, field);
11002 mono_class_init (klass);
11004 /* if the class is Critical then transparent code cannot access it's fields */
11005 if (!is_instance && mono_security_core_clr_enabled ())
11006 ensure_method_is_allowed_to_access_field (cfg, method, field);
11008 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
11009 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
11010 if (mono_security_core_clr_enabled ())
11011 ensure_method_is_allowed_to_access_field (cfg, method, field);
11014 ftype = mono_field_get_type (field);
11017 * LDFLD etc. is usable on static fields as well, so convert those cases to
11020 if (is_instance && ftype->attrs & FIELD_ATTRIBUTE_STATIC) {
11032 g_assert_not_reached ();
11034 is_instance = FALSE;
11037 context_used = mini_class_check_context_used (cfg, klass);
11039 /* INSTANCE CASE */
11041 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
11042 if (op == CEE_STFLD) {
11043 if (target_type_is_incompatible (cfg, field->type, sp [1]))
11045 #ifndef DISABLE_REMOTING
11046 if ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class) {
11047 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
11048 MonoInst *iargs [5];
11050 GSHAREDVT_FAILURE (op);
11052 iargs [0] = sp [0];
11053 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
11054 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
11055 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
11057 iargs [4] = sp [1];
11059 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
11060 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
11061 iargs, ip, cfg->real_offset, TRUE);
11062 CHECK_CFG_EXCEPTION;
11063 g_assert (costs > 0);
11065 cfg->real_offset += 5;
11067 inline_costs += costs;
11069 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
11076 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
11078 if (mini_is_gsharedvt_klass (klass)) {
11079 MonoInst *offset_ins;
11081 context_used = mini_class_check_context_used (cfg, klass);
11083 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
11084 /* The value is offset by 1 */
11085 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
11086 dreg = alloc_ireg_mp (cfg);
11087 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
11088 /* The decomposition will call mini_emit_stobj () which will emit a wbarrier if needed */
11089 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, dreg, 0, sp [1]->dreg);
11091 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
11093 if (sp [0]->opcode != OP_LDADDR)
11094 store->flags |= MONO_INST_FAULT;
11096 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
11097 /* insert call to write barrier */
11101 dreg = alloc_ireg_mp (cfg);
11102 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
11103 emit_write_barrier (cfg, ptr, sp [1]);
11106 store->flags |= ins_flag;
11113 #ifndef DISABLE_REMOTING
11114 if (is_instance && ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class)) {
11115 MonoMethod *wrapper = (op == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
11116 MonoInst *iargs [4];
11118 GSHAREDVT_FAILURE (op);
11120 iargs [0] = sp [0];
11121 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
11122 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
11123 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
11124 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
11125 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
11126 iargs, ip, cfg->real_offset, TRUE);
11127 CHECK_CFG_EXCEPTION;
11128 g_assert (costs > 0);
11130 cfg->real_offset += 5;
11134 inline_costs += costs;
11136 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
11142 if (sp [0]->type == STACK_VTYPE) {
11145 /* Have to compute the address of the variable */
11147 var = get_vreg_to_inst (cfg, sp [0]->dreg);
11149 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
11151 g_assert (var->klass == klass);
11153 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
11157 if (op == CEE_LDFLDA) {
11158 if (sp [0]->type == STACK_OBJ) {
11159 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
11160 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
11163 dreg = alloc_ireg_mp (cfg);
11165 if (mini_is_gsharedvt_klass (klass)) {
11166 MonoInst *offset_ins;
11168 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
11169 /* The value is offset by 1 */
11170 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
11171 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
11173 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
11175 ins->klass = mono_class_from_mono_type (field->type);
11176 ins->type = STACK_MP;
11181 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
11183 if (mini_is_gsharedvt_klass (klass)) {
11184 MonoInst *offset_ins;
11186 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
11187 /* The value is offset by 1 */
11188 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
11189 dreg = alloc_ireg_mp (cfg);
11190 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
11191 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, dreg, 0);
11193 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
11195 load->flags |= ins_flag;
11196 if (sp [0]->opcode != OP_LDADDR)
11197 load->flags |= MONO_INST_FAULT;
11209 context_used = mini_class_check_context_used (cfg, klass);
11211 if (ftype->attrs & FIELD_ATTRIBUTE_LITERAL)
11214 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
11215 * to be called here.
11217 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
11218 mono_class_vtable (cfg->domain, klass);
11219 CHECK_TYPELOAD (klass);
11221 mono_domain_lock (cfg->domain);
11222 if (cfg->domain->special_static_fields)
11223 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
11224 mono_domain_unlock (cfg->domain);
11226 is_special_static = mono_class_field_is_special_static (field);
11228 if (is_special_static && ((gsize)addr & 0x80000000) == 0)
11229 thread_ins = mono_get_thread_intrinsic (cfg);
11233 /* Generate IR to compute the field address */
11234 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && thread_ins && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
11236 * Fast access to TLS data
11237 * Inline version of get_thread_static_data () in
11241 int idx, static_data_reg, array_reg, dreg;
11243 GSHAREDVT_FAILURE (op);
11245 MONO_ADD_INS (cfg->cbb, thread_ins);
11246 static_data_reg = alloc_ireg (cfg);
11247 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, MONO_STRUCT_OFFSET (MonoInternalThread, static_data));
11249 if (cfg->compile_aot) {
11250 int offset_reg, offset2_reg, idx_reg;
11252 /* For TLS variables, this will return the TLS offset */
11253 EMIT_NEW_SFLDACONST (cfg, ins, field);
11254 offset_reg = ins->dreg;
11255 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
11256 idx_reg = alloc_ireg (cfg);
11257 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, idx_reg, offset_reg, 0x3f);
11258 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
11259 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
11260 array_reg = alloc_ireg (cfg);
11261 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
11262 offset2_reg = alloc_ireg (cfg);
11263 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_UN_IMM, offset2_reg, offset_reg, 6);
11264 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset2_reg, 0x1ffffff);
11265 dreg = alloc_ireg (cfg);
11266 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
11268 offset = (gsize)addr & 0x7fffffff;
11269 idx = offset & 0x3f;
11271 array_reg = alloc_ireg (cfg);
11272 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
11273 dreg = alloc_ireg (cfg);
11274 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, ((offset >> 6) & 0x1ffffff));
11276 } else if ((cfg->opt & MONO_OPT_SHARED) ||
11277 (cfg->compile_aot && is_special_static) ||
11278 (context_used && is_special_static)) {
11279 MonoInst *iargs [2];
11281 g_assert (field->parent);
11282 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11283 if (context_used) {
11284 iargs [1] = emit_get_rgctx_field (cfg, context_used,
11285 field, MONO_RGCTX_INFO_CLASS_FIELD);
11287 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
11289 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
11290 } else if (context_used) {
11291 MonoInst *static_data;
11294 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
11295 method->klass->name_space, method->klass->name, method->name,
11296 depth, field->offset);
11299 if (mono_class_needs_cctor_run (klass, method))
11300 emit_class_init (cfg, klass);
11303 * The pointer we're computing here is
11305 * super_info.static_data + field->offset
11307 static_data = emit_get_rgctx_klass (cfg, context_used,
11308 klass, MONO_RGCTX_INFO_STATIC_DATA);
11310 if (mini_is_gsharedvt_klass (klass)) {
11311 MonoInst *offset_ins;
11313 offset_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET);
11314 /* The value is offset by 1 */
11315 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PSUB_IMM, offset_ins->dreg, offset_ins->dreg, 1);
11316 dreg = alloc_ireg_mp (cfg);
11317 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, static_data->dreg, offset_ins->dreg);
11318 } else if (field->offset == 0) {
11321 int addr_reg = mono_alloc_preg (cfg);
11322 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
11324 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
11325 MonoInst *iargs [2];
11327 g_assert (field->parent);
11328 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11329 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
11330 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
11332 MonoVTable *vtable = NULL;
11334 if (!cfg->compile_aot)
11335 vtable = mono_class_vtable (cfg->domain, klass);
11336 CHECK_TYPELOAD (klass);
11339 if (mini_field_access_needs_cctor_run (cfg, method, klass, vtable)) {
11340 if (!(g_slist_find (class_inits, klass))) {
11341 emit_class_init (cfg, klass);
11342 if (cfg->verbose_level > 2)
11343 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
11344 class_inits = g_slist_prepend (class_inits, klass);
11347 if (cfg->run_cctors) {
11349 /* This makes so that inline cannot trigger */
11350 /* .cctors: too many apps depend on them */
11351 /* running with a specific order... */
11353 if (! vtable->initialized)
11354 INLINE_FAILURE ("class init");
11355 ex = mono_runtime_class_init_full (vtable, FALSE);
11357 set_exception_object (cfg, ex);
11358 goto exception_exit;
11362 if (cfg->compile_aot)
11363 EMIT_NEW_SFLDACONST (cfg, ins, field);
11366 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
11368 EMIT_NEW_PCONST (cfg, ins, addr);
11371 MonoInst *iargs [1];
11372 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
11373 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
11377 /* Generate IR to do the actual load/store operation */
11379 if ((op == CEE_STFLD || op == CEE_STSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
11380 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
11381 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
11384 if (op == CEE_LDSFLDA) {
11385 ins->klass = mono_class_from_mono_type (ftype);
11386 ins->type = STACK_PTR;
11388 } else if (op == CEE_STSFLD) {
11391 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, ftype, ins->dreg, 0, store_val->dreg);
11392 store->flags |= ins_flag;
11394 gboolean is_const = FALSE;
11395 MonoVTable *vtable = NULL;
11396 gpointer addr = NULL;
11398 if (!context_used) {
11399 vtable = mono_class_vtable (cfg->domain, klass);
11400 CHECK_TYPELOAD (klass);
11402 if ((ftype->attrs & FIELD_ATTRIBUTE_INIT_ONLY) && (((addr = mono_aot_readonly_field_override (field)) != NULL) ||
11403 (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) && vtable->initialized))) {
11404 int ro_type = ftype->type;
11406 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
11407 if (ro_type == MONO_TYPE_VALUETYPE && ftype->data.klass->enumtype) {
11408 ro_type = mono_class_enum_basetype (ftype->data.klass)->type;
11411 GSHAREDVT_FAILURE (op);
11413 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
11416 case MONO_TYPE_BOOLEAN:
11418 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
11422 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
11425 case MONO_TYPE_CHAR:
11427 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
11431 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
11436 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
11440 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
11445 case MONO_TYPE_PTR:
11446 case MONO_TYPE_FNPTR:
11447 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
11448 type_to_eval_stack_type ((cfg), field->type, *sp);
11451 case MONO_TYPE_STRING:
11452 case MONO_TYPE_OBJECT:
11453 case MONO_TYPE_CLASS:
11454 case MONO_TYPE_SZARRAY:
11455 case MONO_TYPE_ARRAY:
11456 if (!mono_gc_is_moving ()) {
11457 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
11458 type_to_eval_stack_type ((cfg), field->type, *sp);
11466 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
11471 case MONO_TYPE_VALUETYPE:
11481 CHECK_STACK_OVF (1);
11483 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
11484 load->flags |= ins_flag;
11490 if ((op == CEE_LDFLD || op == CEE_LDSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
11491 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
11492 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
11503 token = read32 (ip + 1);
11504 klass = mini_get_class (method, token, generic_context);
11505 CHECK_TYPELOAD (klass);
11506 if (ins_flag & MONO_INST_VOLATILE) {
11507 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
11508 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
11510 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
11511 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
11512 ins->flags |= ins_flag;
11513 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
11514 generic_class_is_reference_type (cfg, klass)) {
11515 /* insert call to write barrier */
11516 emit_write_barrier (cfg, sp [0], sp [1]);
11528 const char *data_ptr;
11530 guint32 field_token;
11536 token = read32 (ip + 1);
11538 klass = mini_get_class (method, token, generic_context);
11539 CHECK_TYPELOAD (klass);
11541 context_used = mini_class_check_context_used (cfg, klass);
11543 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
11544 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_OVF_U4);
11545 ins->sreg1 = sp [0]->dreg;
11546 ins->type = STACK_I4;
11547 ins->dreg = alloc_ireg (cfg);
11548 MONO_ADD_INS (cfg->cbb, ins);
11549 *sp = mono_decompose_opcode (cfg, ins);
11552 if (context_used) {
11553 MonoInst *args [3];
11554 MonoClass *array_class = mono_array_class_get (klass, 1);
11555 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class);
11557 /* FIXME: Use OP_NEWARR and decompose later to help abcrem */
11560 args [0] = emit_get_rgctx_klass (cfg, context_used,
11561 array_class, MONO_RGCTX_INFO_VTABLE);
11566 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
11568 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
11570 if (cfg->opt & MONO_OPT_SHARED) {
11571 /* Decompose now to avoid problems with references to the domainvar */
11572 MonoInst *iargs [3];
11574 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11575 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
11576 iargs [2] = sp [0];
11578 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
11580 /* Decompose later since it is needed by abcrem */
11581 MonoClass *array_type = mono_array_class_get (klass, 1);
11582 mono_class_vtable (cfg->domain, array_type);
11583 CHECK_TYPELOAD (array_type);
11585 MONO_INST_NEW (cfg, ins, OP_NEWARR);
11586 ins->dreg = alloc_ireg_ref (cfg);
11587 ins->sreg1 = sp [0]->dreg;
11588 ins->inst_newa_class = klass;
11589 ins->type = STACK_OBJ;
11590 ins->klass = array_type;
11591 MONO_ADD_INS (cfg->cbb, ins);
11592 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
11593 cfg->cbb->has_array_access = TRUE;
11595 /* Needed so mono_emit_load_get_addr () gets called */
11596 mono_get_got_var (cfg);
11606 * we inline/optimize the initialization sequence if possible.
11607 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
11608 * for small sizes open code the memcpy
11609 * ensure the rva field is big enough
11611 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, cfg->cbb, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
11612 MonoMethod *memcpy_method = get_memcpy_method ();
11613 MonoInst *iargs [3];
11614 int add_reg = alloc_ireg_mp (cfg);
11616 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, MONO_STRUCT_OFFSET (MonoArray, vector));
11617 if (cfg->compile_aot) {
11618 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
11620 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
11622 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
11623 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
11632 if (sp [0]->type != STACK_OBJ)
11635 MONO_INST_NEW (cfg, ins, OP_LDLEN);
11636 ins->dreg = alloc_preg (cfg);
11637 ins->sreg1 = sp [0]->dreg;
11638 ins->type = STACK_I4;
11639 /* This flag will be inherited by the decomposition */
11640 ins->flags |= MONO_INST_FAULT;
11641 MONO_ADD_INS (cfg->cbb, ins);
11642 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
11643 cfg->cbb->has_array_access = TRUE;
11651 if (sp [0]->type != STACK_OBJ)
11654 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11656 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11657 CHECK_TYPELOAD (klass);
11658 /* we need to make sure that this array is exactly the type it needs
11659 * to be for correctness. the wrappers are lax with their usage
11660 * so we need to ignore them here
11662 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
11663 MonoClass *array_class = mono_array_class_get (klass, 1);
11664 mini_emit_check_array_type (cfg, sp [0], array_class);
11665 CHECK_TYPELOAD (array_class);
11669 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11674 case CEE_LDELEM_I1:
11675 case CEE_LDELEM_U1:
11676 case CEE_LDELEM_I2:
11677 case CEE_LDELEM_U2:
11678 case CEE_LDELEM_I4:
11679 case CEE_LDELEM_U4:
11680 case CEE_LDELEM_I8:
11682 case CEE_LDELEM_R4:
11683 case CEE_LDELEM_R8:
11684 case CEE_LDELEM_REF: {
11690 if (*ip == CEE_LDELEM) {
11692 token = read32 (ip + 1);
11693 klass = mini_get_class (method, token, generic_context);
11694 CHECK_TYPELOAD (klass);
11695 mono_class_init (klass);
11698 klass = array_access_to_klass (*ip);
11700 if (sp [0]->type != STACK_OBJ)
11703 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11705 if (mini_is_gsharedvt_variable_klass (klass)) {
11706 // FIXME-VT: OP_ICONST optimization
11707 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11708 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
11709 ins->opcode = OP_LOADV_MEMBASE;
11710 } else if (sp [1]->opcode == OP_ICONST) {
11711 int array_reg = sp [0]->dreg;
11712 int index_reg = sp [1]->dreg;
11713 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
11715 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
11716 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
11718 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11719 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
11722 if (*ip == CEE_LDELEM)
11729 case CEE_STELEM_I1:
11730 case CEE_STELEM_I2:
11731 case CEE_STELEM_I4:
11732 case CEE_STELEM_I8:
11733 case CEE_STELEM_R4:
11734 case CEE_STELEM_R8:
11735 case CEE_STELEM_REF:
11740 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11742 if (*ip == CEE_STELEM) {
11744 token = read32 (ip + 1);
11745 klass = mini_get_class (method, token, generic_context);
11746 CHECK_TYPELOAD (klass);
11747 mono_class_init (klass);
11750 klass = array_access_to_klass (*ip);
11752 if (sp [0]->type != STACK_OBJ)
11755 emit_array_store (cfg, klass, sp, TRUE);
11757 if (*ip == CEE_STELEM)
11764 case CEE_CKFINITE: {
11768 if (cfg->llvm_only) {
11769 MonoInst *iargs [1];
11771 iargs [0] = sp [0];
11772 *sp++ = mono_emit_jit_icall (cfg, mono_ckfinite, iargs);
11774 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
11775 ins->sreg1 = sp [0]->dreg;
11776 ins->dreg = alloc_freg (cfg);
11777 ins->type = STACK_R8;
11778 MONO_ADD_INS (cfg->cbb, ins);
11780 *sp++ = mono_decompose_opcode (cfg, ins);
11786 case CEE_REFANYVAL: {
11787 MonoInst *src_var, *src;
11789 int klass_reg = alloc_preg (cfg);
11790 int dreg = alloc_preg (cfg);
11792 GSHAREDVT_FAILURE (*ip);
11795 MONO_INST_NEW (cfg, ins, *ip);
11798 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11799 CHECK_TYPELOAD (klass);
11801 context_used = mini_class_check_context_used (cfg, klass);
11804 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
11806 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
11807 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
11808 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass));
11810 if (context_used) {
11811 MonoInst *klass_ins;
11813 klass_ins = emit_get_rgctx_klass (cfg, context_used,
11814 klass, MONO_RGCTX_INFO_KLASS);
11817 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
11818 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
11820 mini_emit_class_check (cfg, klass_reg, klass);
11822 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value));
11823 ins->type = STACK_MP;
11824 ins->klass = klass;
11829 case CEE_MKREFANY: {
11830 MonoInst *loc, *addr;
11832 GSHAREDVT_FAILURE (*ip);
11835 MONO_INST_NEW (cfg, ins, *ip);
11838 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11839 CHECK_TYPELOAD (klass);
11841 context_used = mini_class_check_context_used (cfg, klass);
11843 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
11844 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
11846 if (context_used) {
11847 MonoInst *const_ins;
11848 int type_reg = alloc_preg (cfg);
11850 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
11851 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
11852 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
11853 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
11854 } else if (cfg->compile_aot) {
11855 int const_reg = alloc_preg (cfg);
11856 int type_reg = alloc_preg (cfg);
11858 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
11859 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
11860 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
11861 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
11863 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
11864 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), klass);
11866 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
11868 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
11869 ins->type = STACK_VTYPE;
11870 ins->klass = mono_defaults.typed_reference_class;
11875 case CEE_LDTOKEN: {
11877 MonoClass *handle_class;
11879 CHECK_STACK_OVF (1);
11882 n = read32 (ip + 1);
11884 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
11885 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
11886 handle = mono_method_get_wrapper_data (method, n);
11887 handle_class = mono_method_get_wrapper_data (method, n + 1);
11888 if (handle_class == mono_defaults.typehandle_class)
11889 handle = &((MonoClass*)handle)->byval_arg;
11892 handle = mono_ldtoken_checked (image, n, &handle_class, generic_context, &cfg->error);
11897 mono_class_init (handle_class);
11898 if (cfg->gshared) {
11899 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
11900 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
11901 /* This case handles ldtoken
11902 of an open type, like for
11905 } else if (handle_class == mono_defaults.typehandle_class) {
11906 context_used = mini_class_check_context_used (cfg, mono_class_from_mono_type (handle));
11907 } else if (handle_class == mono_defaults.fieldhandle_class)
11908 context_used = mini_class_check_context_used (cfg, ((MonoClassField*)handle)->parent);
11909 else if (handle_class == mono_defaults.methodhandle_class)
11910 context_used = mini_method_check_context_used (cfg, handle);
11912 g_assert_not_reached ();
11915 if ((cfg->opt & MONO_OPT_SHARED) &&
11916 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
11917 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
11918 MonoInst *addr, *vtvar, *iargs [3];
11919 int method_context_used;
11921 method_context_used = mini_method_check_context_used (cfg, method);
11923 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
11925 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
11926 EMIT_NEW_ICONST (cfg, iargs [1], n);
11927 if (method_context_used) {
11928 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
11929 method, MONO_RGCTX_INFO_METHOD);
11930 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
11932 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
11933 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
11935 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11937 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
11939 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11941 if ((ip + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 5) &&
11942 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
11943 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
11944 (cmethod->klass == mono_defaults.systemtype_class) &&
11945 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
11946 MonoClass *tclass = mono_class_from_mono_type (handle);
11948 mono_class_init (tclass);
11949 if (context_used) {
11950 ins = emit_get_rgctx_klass (cfg, context_used,
11951 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
11952 } else if (cfg->compile_aot) {
11953 if (method->wrapper_type) {
11954 mono_error_init (&error); //got to do it since there are multiple conditionals below
11955 if (mono_class_get_checked (tclass->image, tclass->type_token, &error) == tclass && !generic_context) {
11956 /* Special case for static synchronized wrappers */
11957 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
11959 mono_error_cleanup (&error); /* FIXME don't swallow the error */
11960 /* FIXME: n is not a normal token */
11962 EMIT_NEW_PCONST (cfg, ins, NULL);
11965 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
11968 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
11970 ins->type = STACK_OBJ;
11971 ins->klass = cmethod->klass;
11974 MonoInst *addr, *vtvar;
11976 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
11978 if (context_used) {
11979 if (handle_class == mono_defaults.typehandle_class) {
11980 ins = emit_get_rgctx_klass (cfg, context_used,
11981 mono_class_from_mono_type (handle),
11982 MONO_RGCTX_INFO_TYPE);
11983 } else if (handle_class == mono_defaults.methodhandle_class) {
11984 ins = emit_get_rgctx_method (cfg, context_used,
11985 handle, MONO_RGCTX_INFO_METHOD);
11986 } else if (handle_class == mono_defaults.fieldhandle_class) {
11987 ins = emit_get_rgctx_field (cfg, context_used,
11988 handle, MONO_RGCTX_INFO_CLASS_FIELD);
11990 g_assert_not_reached ();
11992 } else if (cfg->compile_aot) {
11993 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n, generic_context);
11995 EMIT_NEW_PCONST (cfg, ins, handle);
11997 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11998 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
11999 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
12009 MONO_INST_NEW (cfg, ins, OP_THROW);
12011 ins->sreg1 = sp [0]->dreg;
12013 cfg->cbb->out_of_line = TRUE;
12014 MONO_ADD_INS (cfg->cbb, ins);
12015 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
12016 MONO_ADD_INS (cfg->cbb, ins);
12019 link_bblock (cfg, cfg->cbb, end_bblock);
12020 start_new_bblock = 1;
12021 /* This can complicate code generation for llvm since the return value might not be defined */
12022 if (COMPILE_LLVM (cfg))
12023 INLINE_FAILURE ("throw");
12025 case CEE_ENDFINALLY:
12026 /* mono_save_seq_point_info () depends on this */
12027 if (sp != stack_start)
12028 emit_seq_point (cfg, method, ip, FALSE, FALSE);
12029 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
12030 MONO_ADD_INS (cfg->cbb, ins);
12032 start_new_bblock = 1;
12035 * Control will leave the method so empty the stack, otherwise
12036 * the next basic block will start with a nonempty stack.
12038 while (sp != stack_start) {
12043 case CEE_LEAVE_S: {
12046 if (*ip == CEE_LEAVE) {
12048 target = ip + 5 + (gint32)read32(ip + 1);
12051 target = ip + 2 + (signed char)(ip [1]);
12054 /* empty the stack */
12055 while (sp != stack_start) {
12060 * If this leave statement is in a catch block, check for a
12061 * pending exception, and rethrow it if necessary.
12062 * We avoid doing this in runtime invoke wrappers, since those are called
12063 * by native code which excepts the wrapper to catch all exceptions.
12065 for (i = 0; i < header->num_clauses; ++i) {
12066 MonoExceptionClause *clause = &header->clauses [i];
12069 * Use <= in the final comparison to handle clauses with multiple
12070 * leave statements, like in bug #78024.
12071 * The ordering of the exception clauses guarantees that we find the
12072 * innermost clause.
12074 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
12076 MonoBasicBlock *dont_throw;
12081 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
12084 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
12086 NEW_BBLOCK (cfg, dont_throw);
12089 * Currently, we always rethrow the abort exception, despite the
12090 * fact that this is not correct. See thread6.cs for an example.
12091 * But propagating the abort exception is more important than
12092 * getting the sematics right.
12094 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
12095 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
12096 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
12098 MONO_START_BB (cfg, dont_throw);
12103 cfg->cbb->try_end = (intptr_t)(ip - header->code);
12106 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
12108 MonoExceptionClause *clause;
12110 for (tmp = handlers; tmp; tmp = tmp->next) {
12111 clause = tmp->data;
12112 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
12114 link_bblock (cfg, cfg->cbb, tblock);
12115 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
12116 ins->inst_target_bb = tblock;
12117 ins->inst_eh_block = clause;
12118 MONO_ADD_INS (cfg->cbb, ins);
12119 cfg->cbb->has_call_handler = 1;
12120 if (COMPILE_LLVM (cfg)) {
12121 MonoBasicBlock *target_bb;
12124 * Link the finally bblock with the target, since it will
12125 * conceptually branch there.
12126 * FIXME: Have to link the bblock containing the endfinally.
12128 GET_BBLOCK (cfg, target_bb, target);
12129 link_bblock (cfg, tblock, target_bb);
12132 g_list_free (handlers);
12135 MONO_INST_NEW (cfg, ins, OP_BR);
12136 MONO_ADD_INS (cfg->cbb, ins);
12137 GET_BBLOCK (cfg, tblock, target);
12138 link_bblock (cfg, cfg->cbb, tblock);
12139 ins->inst_target_bb = tblock;
12141 start_new_bblock = 1;
12143 if (*ip == CEE_LEAVE)
12152 * Mono specific opcodes
12154 case MONO_CUSTOM_PREFIX: {
12156 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
12160 case CEE_MONO_ICALL: {
12162 MonoJitICallInfo *info;
12164 token = read32 (ip + 2);
12165 func = mono_method_get_wrapper_data (method, token);
12166 info = mono_find_jit_icall_by_addr (func);
12168 g_error ("Could not find icall address in wrapper %s", mono_method_full_name (method, 1));
12171 CHECK_STACK (info->sig->param_count);
12172 sp -= info->sig->param_count;
12174 ins = mono_emit_jit_icall (cfg, info->func, sp);
12175 if (!MONO_TYPE_IS_VOID (info->sig->ret))
12179 inline_costs += 10 * num_calls++;
12183 case CEE_MONO_LDPTR_CARD_TABLE:
12184 case CEE_MONO_LDPTR_NURSERY_START:
12185 case CEE_MONO_LDPTR_NURSERY_BITS:
12186 case CEE_MONO_LDPTR_INT_REQ_FLAG: {
12187 CHECK_STACK_OVF (1);
12190 case CEE_MONO_LDPTR_CARD_TABLE:
12191 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR, NULL);
12193 case CEE_MONO_LDPTR_NURSERY_START:
12194 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_NURSERY_START, NULL);
12196 case CEE_MONO_LDPTR_NURSERY_BITS:
12197 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_GC_NURSERY_BITS, NULL);
12199 case CEE_MONO_LDPTR_INT_REQ_FLAG:
12200 ins = emit_runtime_constant (cfg, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
12206 inline_costs += 10 * num_calls++;
12209 case CEE_MONO_LDPTR: {
12212 CHECK_STACK_OVF (1);
12214 token = read32 (ip + 2);
12216 ptr = mono_method_get_wrapper_data (method, token);
12217 EMIT_NEW_PCONST (cfg, ins, ptr);
12220 inline_costs += 10 * num_calls++;
12221 /* Can't embed random pointers into AOT code */
12225 case CEE_MONO_JIT_ICALL_ADDR: {
12226 MonoJitICallInfo *callinfo;
12229 CHECK_STACK_OVF (1);
12231 token = read32 (ip + 2);
12233 ptr = mono_method_get_wrapper_data (method, token);
12234 callinfo = mono_find_jit_icall_by_addr (ptr);
12235 g_assert (callinfo);
12236 EMIT_NEW_JIT_ICALL_ADDRCONST (cfg, ins, (char*)callinfo->name);
12239 inline_costs += 10 * num_calls++;
12242 case CEE_MONO_ICALL_ADDR: {
12243 MonoMethod *cmethod;
12246 CHECK_STACK_OVF (1);
12248 token = read32 (ip + 2);
12250 cmethod = mono_method_get_wrapper_data (method, token);
12252 if (cfg->compile_aot) {
12253 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
12255 ptr = mono_lookup_internal_call (cmethod);
12257 EMIT_NEW_PCONST (cfg, ins, ptr);
12263 case CEE_MONO_VTADDR: {
12264 MonoInst *src_var, *src;
12270 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
12271 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
12276 case CEE_MONO_NEWOBJ: {
12277 MonoInst *iargs [2];
12279 CHECK_STACK_OVF (1);
12281 token = read32 (ip + 2);
12282 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12283 mono_class_init (klass);
12284 NEW_DOMAINCONST (cfg, iargs [0]);
12285 MONO_ADD_INS (cfg->cbb, iargs [0]);
12286 NEW_CLASSCONST (cfg, iargs [1], klass);
12287 MONO_ADD_INS (cfg->cbb, iargs [1]);
12288 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
12290 inline_costs += 10 * num_calls++;
12293 case CEE_MONO_OBJADDR:
12296 MONO_INST_NEW (cfg, ins, OP_MOVE);
12297 ins->dreg = alloc_ireg_mp (cfg);
12298 ins->sreg1 = sp [0]->dreg;
12299 ins->type = STACK_MP;
12300 MONO_ADD_INS (cfg->cbb, ins);
12304 case CEE_MONO_LDNATIVEOBJ:
12306 * Similar to LDOBJ, but instead load the unmanaged
12307 * representation of the vtype to the stack.
12312 token = read32 (ip + 2);
12313 klass = mono_method_get_wrapper_data (method, token);
12314 g_assert (klass->valuetype);
12315 mono_class_init (klass);
12318 MonoInst *src, *dest, *temp;
12321 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
12322 temp->backend.is_pinvoke = 1;
12323 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
12324 mini_emit_stobj (cfg, dest, src, klass, TRUE);
12326 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
12327 dest->type = STACK_VTYPE;
12328 dest->klass = klass;
12334 case CEE_MONO_RETOBJ: {
12336 * Same as RET, but return the native representation of a vtype
12339 g_assert (cfg->ret);
12340 g_assert (mono_method_signature (method)->pinvoke);
12345 token = read32 (ip + 2);
12346 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12348 if (!cfg->vret_addr) {
12349 g_assert (cfg->ret_var_is_local);
12351 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
12353 EMIT_NEW_RETLOADA (cfg, ins);
12355 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
12357 if (sp != stack_start)
12360 MONO_INST_NEW (cfg, ins, OP_BR);
12361 ins->inst_target_bb = end_bblock;
12362 MONO_ADD_INS (cfg->cbb, ins);
12363 link_bblock (cfg, cfg->cbb, end_bblock);
12364 start_new_bblock = 1;
12368 case CEE_MONO_CISINST:
12369 case CEE_MONO_CCASTCLASS: {
12374 token = read32 (ip + 2);
12375 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12376 if (ip [1] == CEE_MONO_CISINST)
12377 ins = handle_cisinst (cfg, klass, sp [0]);
12379 ins = handle_ccastclass (cfg, klass, sp [0]);
12384 case CEE_MONO_SAVE_LMF:
12385 case CEE_MONO_RESTORE_LMF:
12388 case CEE_MONO_CLASSCONST:
12389 CHECK_STACK_OVF (1);
12391 token = read32 (ip + 2);
12392 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
12395 inline_costs += 10 * num_calls++;
12397 case CEE_MONO_NOT_TAKEN:
12398 cfg->cbb->out_of_line = TRUE;
12401 case CEE_MONO_TLS: {
12404 CHECK_STACK_OVF (1);
12406 key = (gint32)read32 (ip + 2);
12407 g_assert (key < TLS_KEY_NUM);
12409 ins = mono_create_tls_get (cfg, key);
12411 if (cfg->compile_aot) {
12413 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
12414 ins->dreg = alloc_preg (cfg);
12415 ins->type = STACK_PTR;
12417 g_assert_not_reached ();
12420 ins->type = STACK_PTR;
12421 MONO_ADD_INS (cfg->cbb, ins);
12426 case CEE_MONO_DYN_CALL: {
12427 MonoCallInst *call;
12429 /* It would be easier to call a trampoline, but that would put an
12430 * extra frame on the stack, confusing exception handling. So
12431 * implement it inline using an opcode for now.
12434 if (!cfg->dyn_call_var) {
12435 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
12436 /* prevent it from being register allocated */
12437 cfg->dyn_call_var->flags |= MONO_INST_VOLATILE;
12440 /* Has to use a call inst since it local regalloc expects it */
12441 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
12442 ins = (MonoInst*)call;
12444 ins->sreg1 = sp [0]->dreg;
12445 ins->sreg2 = sp [1]->dreg;
12446 MONO_ADD_INS (cfg->cbb, ins);
12448 cfg->param_area = MAX (cfg->param_area, cfg->backend->dyn_call_param_area);
12451 inline_costs += 10 * num_calls++;
12455 case CEE_MONO_MEMORY_BARRIER: {
12457 emit_memory_barrier (cfg, (int)read32 (ip + 2));
12461 case CEE_MONO_JIT_ATTACH: {
12462 MonoInst *args [16], *domain_ins;
12463 MonoInst *ad_ins, *jit_tls_ins;
12464 MonoBasicBlock *next_bb = NULL, *call_bb = NULL;
12466 cfg->orig_domain_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
12468 EMIT_NEW_PCONST (cfg, ins, NULL);
12469 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
12471 ad_ins = mono_get_domain_intrinsic (cfg);
12472 jit_tls_ins = mono_get_jit_tls_intrinsic (cfg);
12474 if (cfg->backend->have_tls_get && ad_ins && jit_tls_ins) {
12475 NEW_BBLOCK (cfg, next_bb);
12476 NEW_BBLOCK (cfg, call_bb);
12478 if (cfg->compile_aot) {
12479 /* AOT code is only used in the root domain */
12480 EMIT_NEW_PCONST (cfg, domain_ins, NULL);
12482 EMIT_NEW_PCONST (cfg, domain_ins, cfg->domain);
12484 MONO_ADD_INS (cfg->cbb, ad_ins);
12485 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, ad_ins->dreg, domain_ins->dreg);
12486 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, call_bb);
12488 MONO_ADD_INS (cfg->cbb, jit_tls_ins);
12489 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, jit_tls_ins->dreg, 0);
12490 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, call_bb);
12492 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, next_bb);
12493 MONO_START_BB (cfg, call_bb);
12496 if (cfg->compile_aot) {
12497 /* AOT code is only used in the root domain */
12498 EMIT_NEW_PCONST (cfg, args [0], NULL);
12500 EMIT_NEW_PCONST (cfg, args [0], cfg->domain);
12502 ins = mono_emit_jit_icall (cfg, mono_jit_thread_attach, args);
12503 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
12506 MONO_START_BB (cfg, next_bb);
12510 case CEE_MONO_JIT_DETACH: {
12511 MonoInst *args [16];
12513 /* Restore the original domain */
12514 dreg = alloc_ireg (cfg);
12515 EMIT_NEW_UNALU (cfg, args [0], OP_MOVE, dreg, cfg->orig_domain_var->dreg);
12516 mono_emit_jit_icall (cfg, mono_jit_set_domain, args);
12521 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
12527 case CEE_PREFIX1: {
12530 case CEE_ARGLIST: {
12531 /* somewhat similar to LDTOKEN */
12532 MonoInst *addr, *vtvar;
12533 CHECK_STACK_OVF (1);
12534 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
12536 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
12537 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
12539 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
12540 ins->type = STACK_VTYPE;
12541 ins->klass = mono_defaults.argumenthandle_class;
12551 MonoInst *cmp, *arg1, *arg2;
12559 * The following transforms:
12560 * CEE_CEQ into OP_CEQ
12561 * CEE_CGT into OP_CGT
12562 * CEE_CGT_UN into OP_CGT_UN
12563 * CEE_CLT into OP_CLT
12564 * CEE_CLT_UN into OP_CLT_UN
12566 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
12568 MONO_INST_NEW (cfg, ins, cmp->opcode);
12569 cmp->sreg1 = arg1->dreg;
12570 cmp->sreg2 = arg2->dreg;
12571 type_from_op (cfg, cmp, arg1, arg2);
12573 add_widen_op (cfg, cmp, &arg1, &arg2);
12574 if ((arg1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((arg1->type == STACK_PTR) || (arg1->type == STACK_OBJ) || (arg1->type == STACK_MP))))
12575 cmp->opcode = OP_LCOMPARE;
12576 else if (arg1->type == STACK_R4)
12577 cmp->opcode = OP_RCOMPARE;
12578 else if (arg1->type == STACK_R8)
12579 cmp->opcode = OP_FCOMPARE;
12581 cmp->opcode = OP_ICOMPARE;
12582 MONO_ADD_INS (cfg->cbb, cmp);
12583 ins->type = STACK_I4;
12584 ins->dreg = alloc_dreg (cfg, ins->type);
12585 type_from_op (cfg, ins, arg1, arg2);
12587 if (cmp->opcode == OP_FCOMPARE || cmp->opcode == OP_RCOMPARE) {
12589 * The backends expect the fceq opcodes to do the
12592 ins->sreg1 = cmp->sreg1;
12593 ins->sreg2 = cmp->sreg2;
12596 MONO_ADD_INS (cfg->cbb, ins);
12602 MonoInst *argconst;
12603 MonoMethod *cil_method;
12605 CHECK_STACK_OVF (1);
12607 n = read32 (ip + 2);
12608 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
12609 if (!cmethod || mono_loader_get_last_error ())
12611 mono_class_init (cmethod->klass);
12613 mono_save_token_info (cfg, image, n, cmethod);
12615 context_used = mini_method_check_context_used (cfg, cmethod);
12617 cil_method = cmethod;
12618 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
12619 METHOD_ACCESS_FAILURE (method, cil_method);
12621 if (mono_security_core_clr_enabled ())
12622 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
12625 * Optimize the common case of ldftn+delegate creation
12627 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
12628 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
12629 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
12630 MonoInst *target_ins, *handle_ins;
12631 MonoMethod *invoke;
12632 int invoke_context_used;
12634 invoke = mono_get_delegate_invoke (ctor_method->klass);
12635 if (!invoke || !mono_method_signature (invoke))
12638 invoke_context_used = mini_method_check_context_used (cfg, invoke);
12640 target_ins = sp [-1];
12642 if (mono_security_core_clr_enabled ())
12643 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method);
12645 if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
12646 /*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
12647 if (mono_method_signature (invoke)->param_count == mono_method_signature (cmethod)->param_count) {
12648 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0);
12649 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException");
12653 /* FIXME: SGEN support */
12654 if (invoke_context_used == 0) {
12656 if (cfg->verbose_level > 3)
12657 g_print ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
12658 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, FALSE))) {
12661 CHECK_CFG_EXCEPTION;
12671 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
12672 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
12676 inline_costs += 10 * num_calls++;
12679 case CEE_LDVIRTFTN: {
12680 MonoInst *args [2];
12684 n = read32 (ip + 2);
12685 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
12686 if (!cmethod || mono_loader_get_last_error ())
12688 mono_class_init (cmethod->klass);
12690 context_used = mini_method_check_context_used (cfg, cmethod);
12692 if (mono_security_core_clr_enabled ())
12693 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
12696 * Optimize the common case of ldvirtftn+delegate creation
12698 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 6) && (ip [6] == CEE_NEWOBJ) && (ip > header->code && ip [-1] == CEE_DUP)) {
12699 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
12700 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
12701 MonoInst *target_ins, *handle_ins;
12702 MonoMethod *invoke;
12703 int invoke_context_used;
12704 gboolean is_virtual = cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL;
12706 invoke = mono_get_delegate_invoke (ctor_method->klass);
12707 if (!invoke || !mono_method_signature (invoke))
12710 invoke_context_used = mini_method_check_context_used (cfg, invoke);
12712 target_ins = sp [-1];
12714 if (mono_security_core_clr_enabled ())
12715 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method);
12717 /* FIXME: SGEN support */
12718 if (invoke_context_used == 0 || cfg->llvm_only) {
12720 if (cfg->verbose_level > 3)
12721 g_print ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
12722 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, is_virtual))) {
12725 CHECK_CFG_EXCEPTION;
12738 args [1] = emit_get_rgctx_method (cfg, context_used,
12739 cmethod, MONO_RGCTX_INFO_METHOD);
12742 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
12744 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
12747 inline_costs += 10 * num_calls++;
12751 CHECK_STACK_OVF (1);
12753 n = read16 (ip + 2);
12755 EMIT_NEW_ARGLOAD (cfg, ins, n);
12760 CHECK_STACK_OVF (1);
12762 n = read16 (ip + 2);
12764 NEW_ARGLOADA (cfg, ins, n);
12765 MONO_ADD_INS (cfg->cbb, ins);
12773 n = read16 (ip + 2);
12775 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
12777 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
12781 CHECK_STACK_OVF (1);
12783 n = read16 (ip + 2);
12785 EMIT_NEW_LOCLOAD (cfg, ins, n);
12790 unsigned char *tmp_ip;
12791 CHECK_STACK_OVF (1);
12793 n = read16 (ip + 2);
12796 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
12802 EMIT_NEW_LOCLOADA (cfg, ins, n);
12811 n = read16 (ip + 2);
12813 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
12815 emit_stloc_ir (cfg, sp, header, n);
12822 if (sp != stack_start)
12824 if (cfg->method != method)
12826 * Inlining this into a loop in a parent could lead to
12827 * stack overflows which is different behavior than the
12828 * non-inlined case, thus disable inlining in this case.
12830 INLINE_FAILURE("localloc");
12832 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
12833 ins->dreg = alloc_preg (cfg);
12834 ins->sreg1 = sp [0]->dreg;
12835 ins->type = STACK_PTR;
12836 MONO_ADD_INS (cfg->cbb, ins);
12838 cfg->flags |= MONO_CFG_HAS_ALLOCA;
12840 ins->flags |= MONO_INST_INIT;
12845 case CEE_ENDFILTER: {
12846 MonoExceptionClause *clause, *nearest;
12851 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
12853 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
12854 ins->sreg1 = (*sp)->dreg;
12855 MONO_ADD_INS (cfg->cbb, ins);
12856 start_new_bblock = 1;
12860 for (cc = 0; cc < header->num_clauses; ++cc) {
12861 clause = &header->clauses [cc];
12862 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
12863 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
12864 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset)))
12867 g_assert (nearest);
12868 if ((ip - header->code) != nearest->handler_offset)
12873 case CEE_UNALIGNED_:
12874 ins_flag |= MONO_INST_UNALIGNED;
12875 /* FIXME: record alignment? we can assume 1 for now */
12879 case CEE_VOLATILE_:
12880 ins_flag |= MONO_INST_VOLATILE;
12884 ins_flag |= MONO_INST_TAILCALL;
12885 cfg->flags |= MONO_CFG_HAS_TAIL;
12886 /* Can't inline tail calls at this time */
12887 inline_costs += 100000;
12894 token = read32 (ip + 2);
12895 klass = mini_get_class (method, token, generic_context);
12896 CHECK_TYPELOAD (klass);
12897 if (generic_class_is_reference_type (cfg, klass))
12898 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
12900 mini_emit_initobj (cfg, *sp, NULL, klass);
12904 case CEE_CONSTRAINED_:
12906 token = read32 (ip + 2);
12907 constrained_class = mini_get_class (method, token, generic_context);
12908 CHECK_TYPELOAD (constrained_class);
12912 case CEE_INITBLK: {
12913 MonoInst *iargs [3];
12917 /* Skip optimized paths for volatile operations. */
12918 if ((ip [1] == CEE_CPBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
12919 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
12920 } else if ((ip [1] == CEE_INITBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
12921 /* emit_memset only works when val == 0 */
12922 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
12925 iargs [0] = sp [0];
12926 iargs [1] = sp [1];
12927 iargs [2] = sp [2];
12928 if (ip [1] == CEE_CPBLK) {
12930 * FIXME: It's unclear whether we should be emitting both the acquire
12931 * and release barriers for cpblk. It is technically both a load and
12932 * store operation, so it seems like that's the sensible thing to do.
12934 * FIXME: We emit full barriers on both sides of the operation for
12935 * simplicity. We should have a separate atomic memcpy method instead.
12937 MonoMethod *memcpy_method = get_memcpy_method ();
12939 if (ins_flag & MONO_INST_VOLATILE)
12940 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
12942 call = mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
12943 call->flags |= ins_flag;
12945 if (ins_flag & MONO_INST_VOLATILE)
12946 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
12948 MonoMethod *memset_method = get_memset_method ();
12949 if (ins_flag & MONO_INST_VOLATILE) {
12950 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
12951 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
12953 call = mono_emit_method_call (cfg, memset_method, iargs, NULL);
12954 call->flags |= ins_flag;
12965 ins_flag |= MONO_INST_NOTYPECHECK;
12967 ins_flag |= MONO_INST_NORANGECHECK;
12968 /* we ignore the no-nullcheck for now since we
12969 * really do it explicitly only when doing callvirt->call
12973 case CEE_RETHROW: {
12975 int handler_offset = -1;
12977 for (i = 0; i < header->num_clauses; ++i) {
12978 MonoExceptionClause *clause = &header->clauses [i];
12979 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
12980 handler_offset = clause->handler_offset;
12985 cfg->cbb->flags |= BB_EXCEPTION_UNSAFE;
12987 if (handler_offset == -1)
12990 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
12991 MONO_INST_NEW (cfg, ins, OP_RETHROW);
12992 ins->sreg1 = load->dreg;
12993 MONO_ADD_INS (cfg->cbb, ins);
12995 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
12996 MONO_ADD_INS (cfg->cbb, ins);
12999 link_bblock (cfg, cfg->cbb, end_bblock);
13000 start_new_bblock = 1;
13008 CHECK_STACK_OVF (1);
13010 token = read32 (ip + 2);
13011 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !image_is_dynamic (method->klass->image) && !generic_context) {
13012 MonoType *type = mono_type_create_from_typespec_checked (image, token, &cfg->error);
13015 val = mono_type_size (type, &ialign);
13017 MonoClass *klass = mini_get_class (method, token, generic_context);
13018 CHECK_TYPELOAD (klass);
13020 val = mono_type_size (&klass->byval_arg, &ialign);
13022 if (mini_is_gsharedvt_klass (klass))
13023 GSHAREDVT_FAILURE (*ip);
13025 EMIT_NEW_ICONST (cfg, ins, val);
13030 case CEE_REFANYTYPE: {
13031 MonoInst *src_var, *src;
13033 GSHAREDVT_FAILURE (*ip);
13039 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
13041 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
13042 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
13043 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type));
13048 case CEE_READONLY_:
13061 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
13071 g_warning ("opcode 0x%02x not handled", *ip);
13075 if (start_new_bblock != 1)
13078 cfg->cbb->cil_length = ip - cfg->cbb->cil_code;
13079 if (cfg->cbb->next_bb) {
13080 /* This could already be set because of inlining, #693905 */
13081 MonoBasicBlock *bb = cfg->cbb;
13083 while (bb->next_bb)
13085 bb->next_bb = end_bblock;
13087 cfg->cbb->next_bb = end_bblock;
13090 if (cfg->method == method && cfg->domainvar) {
13092 MonoInst *get_domain;
13094 cfg->cbb = init_localsbb;
13096 if ((get_domain = mono_get_domain_intrinsic (cfg))) {
13097 MONO_ADD_INS (cfg->cbb, get_domain);
13099 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
13101 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
13102 MONO_ADD_INS (cfg->cbb, store);
13105 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
13106 if (cfg->compile_aot)
13107 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
13108 mono_get_got_var (cfg);
13111 if (cfg->method == method && cfg->got_var)
13112 mono_emit_load_got_addr (cfg);
13114 if (init_localsbb) {
13115 cfg->cbb = init_localsbb;
13117 for (i = 0; i < header->num_locals; ++i) {
13118 emit_init_local (cfg, i, header->locals [i], init_locals);
13122 if (cfg->init_ref_vars && cfg->method == method) {
13123 /* Emit initialization for ref vars */
13124 // FIXME: Avoid duplication initialization for IL locals.
13125 for (i = 0; i < cfg->num_varinfo; ++i) {
13126 MonoInst *ins = cfg->varinfo [i];
13128 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
13129 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
13133 if (cfg->lmf_var && cfg->method == method && !cfg->llvm_only) {
13134 cfg->cbb = init_localsbb;
13135 emit_push_lmf (cfg);
13138 cfg->cbb = init_localsbb;
13139 emit_instrumentation_call (cfg, mono_profiler_method_enter);
13142 MonoBasicBlock *bb;
13145 * Make seq points at backward branch targets interruptable.
13147 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
13148 if (bb->code && bb->in_count > 1 && bb->code->opcode == OP_SEQ_POINT)
13149 bb->code->flags |= MONO_INST_SINGLE_STEP_LOC;
13152 /* Add a sequence point for method entry/exit events */
13153 if (seq_points && cfg->gen_sdb_seq_points) {
13154 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
13155 MONO_ADD_INS (init_localsbb, ins);
13156 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
13157 MONO_ADD_INS (cfg->bb_exit, ins);
13161 * Add seq points for IL offsets which have line number info, but wasn't generated a seq point during JITting because
13162 * the code they refer to was dead (#11880).
13164 if (sym_seq_points) {
13165 for (i = 0; i < header->code_size; ++i) {
13166 if (mono_bitset_test_fast (seq_point_locs, i) && !mono_bitset_test_fast (seq_point_set_locs, i)) {
13169 NEW_SEQ_POINT (cfg, ins, i, FALSE);
13170 mono_add_seq_point (cfg, NULL, ins, SEQ_POINT_NATIVE_OFFSET_DEAD_CODE);
13177 if (cfg->method == method) {
13178 MonoBasicBlock *bb;
13179 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13180 bb->region = mono_find_block_region (cfg, bb->real_offset);
13182 mono_create_spvar_for_region (cfg, bb->region);
13183 if (cfg->verbose_level > 2)
13184 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
13188 if (inline_costs < 0) {
13191 /* Method is too large */
13192 mname = mono_method_full_name (method, TRUE);
13193 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
13194 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
13198 if ((cfg->verbose_level > 2) && (cfg->method == method))
13199 mono_print_code (cfg, "AFTER METHOD-TO-IR");
13204 g_assert (!mono_error_ok (&cfg->error));
13208 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
13212 set_exception_type_from_invalid_il (cfg, method, ip);
13216 g_slist_free (class_inits);
13217 mono_basic_block_free (original_bb);
13218 cfg->dont_inline = g_list_remove (cfg->dont_inline, method);
13219 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
13220 if (cfg->exception_type)
13223 return inline_costs;
13227 store_membase_reg_to_store_membase_imm (int opcode)
13230 case OP_STORE_MEMBASE_REG:
13231 return OP_STORE_MEMBASE_IMM;
13232 case OP_STOREI1_MEMBASE_REG:
13233 return OP_STOREI1_MEMBASE_IMM;
13234 case OP_STOREI2_MEMBASE_REG:
13235 return OP_STOREI2_MEMBASE_IMM;
13236 case OP_STOREI4_MEMBASE_REG:
13237 return OP_STOREI4_MEMBASE_IMM;
13238 case OP_STOREI8_MEMBASE_REG:
13239 return OP_STOREI8_MEMBASE_IMM;
13241 g_assert_not_reached ();
13248 mono_op_to_op_imm (int opcode)
13252 return OP_IADD_IMM;
13254 return OP_ISUB_IMM;
13256 return OP_IDIV_IMM;
13258 return OP_IDIV_UN_IMM;
13260 return OP_IREM_IMM;
13262 return OP_IREM_UN_IMM;
13264 return OP_IMUL_IMM;
13266 return OP_IAND_IMM;
13270 return OP_IXOR_IMM;
13272 return OP_ISHL_IMM;
13274 return OP_ISHR_IMM;
13276 return OP_ISHR_UN_IMM;
13279 return OP_LADD_IMM;
13281 return OP_LSUB_IMM;
13283 return OP_LAND_IMM;
13287 return OP_LXOR_IMM;
13289 return OP_LSHL_IMM;
13291 return OP_LSHR_IMM;
13293 return OP_LSHR_UN_IMM;
13294 #if SIZEOF_REGISTER == 8
13296 return OP_LREM_IMM;
13300 return OP_COMPARE_IMM;
13302 return OP_ICOMPARE_IMM;
13304 return OP_LCOMPARE_IMM;
13306 case OP_STORE_MEMBASE_REG:
13307 return OP_STORE_MEMBASE_IMM;
13308 case OP_STOREI1_MEMBASE_REG:
13309 return OP_STOREI1_MEMBASE_IMM;
13310 case OP_STOREI2_MEMBASE_REG:
13311 return OP_STOREI2_MEMBASE_IMM;
13312 case OP_STOREI4_MEMBASE_REG:
13313 return OP_STOREI4_MEMBASE_IMM;
13315 #if defined(TARGET_X86) || defined (TARGET_AMD64)
13317 return OP_X86_PUSH_IMM;
13318 case OP_X86_COMPARE_MEMBASE_REG:
13319 return OP_X86_COMPARE_MEMBASE_IMM;
13321 #if defined(TARGET_AMD64)
13322 case OP_AMD64_ICOMPARE_MEMBASE_REG:
13323 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
13325 case OP_VOIDCALL_REG:
13326 return OP_VOIDCALL;
13334 return OP_LOCALLOC_IMM;
13341 ldind_to_load_membase (int opcode)
13345 return OP_LOADI1_MEMBASE;
13347 return OP_LOADU1_MEMBASE;
13349 return OP_LOADI2_MEMBASE;
13351 return OP_LOADU2_MEMBASE;
13353 return OP_LOADI4_MEMBASE;
13355 return OP_LOADU4_MEMBASE;
13357 return OP_LOAD_MEMBASE;
13358 case CEE_LDIND_REF:
13359 return OP_LOAD_MEMBASE;
13361 return OP_LOADI8_MEMBASE;
13363 return OP_LOADR4_MEMBASE;
13365 return OP_LOADR8_MEMBASE;
13367 g_assert_not_reached ();
13374 stind_to_store_membase (int opcode)
13378 return OP_STOREI1_MEMBASE_REG;
13380 return OP_STOREI2_MEMBASE_REG;
13382 return OP_STOREI4_MEMBASE_REG;
13384 case CEE_STIND_REF:
13385 return OP_STORE_MEMBASE_REG;
13387 return OP_STOREI8_MEMBASE_REG;
13389 return OP_STORER4_MEMBASE_REG;
13391 return OP_STORER8_MEMBASE_REG;
13393 g_assert_not_reached ();
13400 mono_load_membase_to_load_mem (int opcode)
13402 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
13403 #if defined(TARGET_X86) || defined(TARGET_AMD64)
13405 case OP_LOAD_MEMBASE:
13406 return OP_LOAD_MEM;
13407 case OP_LOADU1_MEMBASE:
13408 return OP_LOADU1_MEM;
13409 case OP_LOADU2_MEMBASE:
13410 return OP_LOADU2_MEM;
13411 case OP_LOADI4_MEMBASE:
13412 return OP_LOADI4_MEM;
13413 case OP_LOADU4_MEMBASE:
13414 return OP_LOADU4_MEM;
13415 #if SIZEOF_REGISTER == 8
13416 case OP_LOADI8_MEMBASE:
13417 return OP_LOADI8_MEM;
13426 op_to_op_dest_membase (int store_opcode, int opcode)
13428 #if defined(TARGET_X86)
13429 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
13434 return OP_X86_ADD_MEMBASE_REG;
13436 return OP_X86_SUB_MEMBASE_REG;
13438 return OP_X86_AND_MEMBASE_REG;
13440 return OP_X86_OR_MEMBASE_REG;
13442 return OP_X86_XOR_MEMBASE_REG;
13445 return OP_X86_ADD_MEMBASE_IMM;
13448 return OP_X86_SUB_MEMBASE_IMM;
13451 return OP_X86_AND_MEMBASE_IMM;
13454 return OP_X86_OR_MEMBASE_IMM;
13457 return OP_X86_XOR_MEMBASE_IMM;
13463 #if defined(TARGET_AMD64)
13464 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
13469 return OP_X86_ADD_MEMBASE_REG;
13471 return OP_X86_SUB_MEMBASE_REG;
13473 return OP_X86_AND_MEMBASE_REG;
13475 return OP_X86_OR_MEMBASE_REG;
13477 return OP_X86_XOR_MEMBASE_REG;
13479 return OP_X86_ADD_MEMBASE_IMM;
13481 return OP_X86_SUB_MEMBASE_IMM;
13483 return OP_X86_AND_MEMBASE_IMM;
13485 return OP_X86_OR_MEMBASE_IMM;
13487 return OP_X86_XOR_MEMBASE_IMM;
13489 return OP_AMD64_ADD_MEMBASE_REG;
13491 return OP_AMD64_SUB_MEMBASE_REG;
13493 return OP_AMD64_AND_MEMBASE_REG;
13495 return OP_AMD64_OR_MEMBASE_REG;
13497 return OP_AMD64_XOR_MEMBASE_REG;
13500 return OP_AMD64_ADD_MEMBASE_IMM;
13503 return OP_AMD64_SUB_MEMBASE_IMM;
13506 return OP_AMD64_AND_MEMBASE_IMM;
13509 return OP_AMD64_OR_MEMBASE_IMM;
13512 return OP_AMD64_XOR_MEMBASE_IMM;
13522 op_to_op_store_membase (int store_opcode, int opcode)
13524 #if defined(TARGET_X86) || defined(TARGET_AMD64)
13527 if (store_opcode == OP_STOREI1_MEMBASE_REG)
13528 return OP_X86_SETEQ_MEMBASE;
13530 if (store_opcode == OP_STOREI1_MEMBASE_REG)
13531 return OP_X86_SETNE_MEMBASE;
13539 op_to_op_src1_membase (MonoCompile *cfg, int load_opcode, int opcode)
13542 /* FIXME: This has sign extension issues */
13544 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
13545 return OP_X86_COMPARE_MEMBASE8_IMM;
13548 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
13553 return OP_X86_PUSH_MEMBASE;
13554 case OP_COMPARE_IMM:
13555 case OP_ICOMPARE_IMM:
13556 return OP_X86_COMPARE_MEMBASE_IMM;
13559 return OP_X86_COMPARE_MEMBASE_REG;
13563 #ifdef TARGET_AMD64
13564 /* FIXME: This has sign extension issues */
13566 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
13567 return OP_X86_COMPARE_MEMBASE8_IMM;
13572 if ((load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32) || (load_opcode == OP_LOADI8_MEMBASE))
13573 return OP_X86_PUSH_MEMBASE;
13575 /* FIXME: This only works for 32 bit immediates
13576 case OP_COMPARE_IMM:
13577 case OP_LCOMPARE_IMM:
13578 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
13579 return OP_AMD64_COMPARE_MEMBASE_IMM;
13581 case OP_ICOMPARE_IMM:
13582 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
13583 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
13587 if (cfg->backend->ilp32 && load_opcode == OP_LOAD_MEMBASE)
13588 return OP_AMD64_ICOMPARE_MEMBASE_REG;
13589 if ((load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32) || (load_opcode == OP_LOADI8_MEMBASE))
13590 return OP_AMD64_COMPARE_MEMBASE_REG;
13593 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
13594 return OP_AMD64_ICOMPARE_MEMBASE_REG;
13603 op_to_op_src2_membase (MonoCompile *cfg, int load_opcode, int opcode)
13606 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
13612 return OP_X86_COMPARE_REG_MEMBASE;
13614 return OP_X86_ADD_REG_MEMBASE;
13616 return OP_X86_SUB_REG_MEMBASE;
13618 return OP_X86_AND_REG_MEMBASE;
13620 return OP_X86_OR_REG_MEMBASE;
13622 return OP_X86_XOR_REG_MEMBASE;
13626 #ifdef TARGET_AMD64
13627 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE && cfg->backend->ilp32)) {
13630 return OP_AMD64_ICOMPARE_REG_MEMBASE;
13632 return OP_X86_ADD_REG_MEMBASE;
13634 return OP_X86_SUB_REG_MEMBASE;
13636 return OP_X86_AND_REG_MEMBASE;
13638 return OP_X86_OR_REG_MEMBASE;
13640 return OP_X86_XOR_REG_MEMBASE;
13642 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE && !cfg->backend->ilp32)) {
13646 return OP_AMD64_COMPARE_REG_MEMBASE;
13648 return OP_AMD64_ADD_REG_MEMBASE;
13650 return OP_AMD64_SUB_REG_MEMBASE;
13652 return OP_AMD64_AND_REG_MEMBASE;
13654 return OP_AMD64_OR_REG_MEMBASE;
13656 return OP_AMD64_XOR_REG_MEMBASE;
13665 mono_op_to_op_imm_noemul (int opcode)
13668 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
13674 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
13681 #if defined(MONO_ARCH_EMULATE_MUL_DIV)
13686 return mono_op_to_op_imm (opcode);
13691 * mono_handle_global_vregs:
13693 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
13697 mono_handle_global_vregs (MonoCompile *cfg)
13699 gint32 *vreg_to_bb;
13700 MonoBasicBlock *bb;
13703 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
13705 #ifdef MONO_ARCH_SIMD_INTRINSICS
13706 if (cfg->uses_simd_intrinsics)
13707 mono_simd_simplify_indirection (cfg);
13710 /* Find local vregs used in more than one bb */
13711 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13712 MonoInst *ins = bb->code;
13713 int block_num = bb->block_num;
13715 if (cfg->verbose_level > 2)
13716 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
13719 for (; ins; ins = ins->next) {
13720 const char *spec = INS_INFO (ins->opcode);
13721 int regtype = 0, regindex;
13724 if (G_UNLIKELY (cfg->verbose_level > 2))
13725 mono_print_ins (ins);
13727 g_assert (ins->opcode >= MONO_CEE_LAST);
13729 for (regindex = 0; regindex < 4; regindex ++) {
13732 if (regindex == 0) {
13733 regtype = spec [MONO_INST_DEST];
13734 if (regtype == ' ')
13737 } else if (regindex == 1) {
13738 regtype = spec [MONO_INST_SRC1];
13739 if (regtype == ' ')
13742 } else if (regindex == 2) {
13743 regtype = spec [MONO_INST_SRC2];
13744 if (regtype == ' ')
13747 } else if (regindex == 3) {
13748 regtype = spec [MONO_INST_SRC3];
13749 if (regtype == ' ')
13754 #if SIZEOF_REGISTER == 4
13755 /* In the LLVM case, the long opcodes are not decomposed */
13756 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
13758 * Since some instructions reference the original long vreg,
13759 * and some reference the two component vregs, it is quite hard
13760 * to determine when it needs to be global. So be conservative.
13762 if (!get_vreg_to_inst (cfg, vreg)) {
13763 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
13765 if (cfg->verbose_level > 2)
13766 printf ("LONG VREG R%d made global.\n", vreg);
13770 * Make the component vregs volatile since the optimizations can
13771 * get confused otherwise.
13773 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
13774 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
13778 g_assert (vreg != -1);
13780 prev_bb = vreg_to_bb [vreg];
13781 if (prev_bb == 0) {
13782 /* 0 is a valid block num */
13783 vreg_to_bb [vreg] = block_num + 1;
13784 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
13785 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
13788 if (!get_vreg_to_inst (cfg, vreg)) {
13789 if (G_UNLIKELY (cfg->verbose_level > 2))
13790 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
13794 if (vreg_is_ref (cfg, vreg))
13795 mono_compile_create_var_for_vreg (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL, vreg);
13797 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
13800 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
13803 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
13806 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
13809 g_assert_not_reached ();
13813 /* Flag as having been used in more than one bb */
13814 vreg_to_bb [vreg] = -1;
13820 /* If a variable is used in only one bblock, convert it into a local vreg */
13821 for (i = 0; i < cfg->num_varinfo; i++) {
13822 MonoInst *var = cfg->varinfo [i];
13823 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
13825 switch (var->type) {
13831 #if SIZEOF_REGISTER == 8
13834 #if !defined(TARGET_X86)
13835 /* Enabling this screws up the fp stack on x86 */
13838 if (mono_arch_is_soft_float ())
13841 /* Arguments are implicitly global */
13842 /* Putting R4 vars into registers doesn't work currently */
13843 /* The gsharedvt vars are implicitly referenced by ldaddr opcodes, but those opcodes are only generated later */
13844 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg && var != cfg->gsharedvt_info_var && var != cfg->gsharedvt_locals_var && var != cfg->lmf_addr_var) {
13846 * Make that the variable's liveness interval doesn't contain a call, since
13847 * that would cause the lvreg to be spilled, making the whole optimization
13850 /* This is too slow for JIT compilation */
13852 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
13854 int def_index, call_index, ins_index;
13855 gboolean spilled = FALSE;
13860 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
13861 const char *spec = INS_INFO (ins->opcode);
13863 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
13864 def_index = ins_index;
13866 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
13867 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
13868 if (call_index > def_index) {
13874 if (MONO_IS_CALL (ins))
13875 call_index = ins_index;
13885 if (G_UNLIKELY (cfg->verbose_level > 2))
13886 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
13887 var->flags |= MONO_INST_IS_DEAD;
13888 cfg->vreg_to_inst [var->dreg] = NULL;
13895 * Compress the varinfo and vars tables so the liveness computation is faster and
13896 * takes up less space.
13899 for (i = 0; i < cfg->num_varinfo; ++i) {
13900 MonoInst *var = cfg->varinfo [i];
13901 if (pos < i && cfg->locals_start == i)
13902 cfg->locals_start = pos;
13903 if (!(var->flags & MONO_INST_IS_DEAD)) {
13905 cfg->varinfo [pos] = cfg->varinfo [i];
13906 cfg->varinfo [pos]->inst_c0 = pos;
13907 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
13908 cfg->vars [pos].idx = pos;
13909 #if SIZEOF_REGISTER == 4
13910 if (cfg->varinfo [pos]->type == STACK_I8) {
13911 /* Modify the two component vars too */
13914 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
13915 var1->inst_c0 = pos;
13916 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
13917 var1->inst_c0 = pos;
13924 cfg->num_varinfo = pos;
13925 if (cfg->locals_start > cfg->num_varinfo)
13926 cfg->locals_start = cfg->num_varinfo;
13930 * mono_spill_global_vars:
13932 * Generate spill code for variables which are not allocated to registers,
13933 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
13934 * code is generated which could be optimized by the local optimization passes.
13937 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
13939 MonoBasicBlock *bb;
13941 int orig_next_vreg;
13942 guint32 *vreg_to_lvreg;
13944 guint32 i, lvregs_len;
13945 gboolean dest_has_lvreg = FALSE;
13946 guint32 stacktypes [128];
13947 MonoInst **live_range_start, **live_range_end;
13948 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
13949 int *gsharedvt_vreg_to_idx = NULL;
13951 *need_local_opts = FALSE;
13953 memset (spec2, 0, sizeof (spec2));
13955 /* FIXME: Move this function to mini.c */
13956 stacktypes ['i'] = STACK_PTR;
13957 stacktypes ['l'] = STACK_I8;
13958 stacktypes ['f'] = STACK_R8;
13959 #ifdef MONO_ARCH_SIMD_INTRINSICS
13960 stacktypes ['x'] = STACK_VTYPE;
13963 #if SIZEOF_REGISTER == 4
13964 /* Create MonoInsts for longs */
13965 for (i = 0; i < cfg->num_varinfo; i++) {
13966 MonoInst *ins = cfg->varinfo [i];
13968 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
13969 switch (ins->type) {
13974 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
13977 g_assert (ins->opcode == OP_REGOFFSET);
13979 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
13981 tree->opcode = OP_REGOFFSET;
13982 tree->inst_basereg = ins->inst_basereg;
13983 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
13985 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
13987 tree->opcode = OP_REGOFFSET;
13988 tree->inst_basereg = ins->inst_basereg;
13989 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
13999 if (cfg->compute_gc_maps) {
14000 /* registers need liveness info even for !non refs */
14001 for (i = 0; i < cfg->num_varinfo; i++) {
14002 MonoInst *ins = cfg->varinfo [i];
14004 if (ins->opcode == OP_REGVAR)
14005 ins->flags |= MONO_INST_GC_TRACK;
14009 if (cfg->gsharedvt) {
14010 gsharedvt_vreg_to_idx = mono_mempool_alloc0 (cfg->mempool, sizeof (int) * cfg->next_vreg);
14012 for (i = 0; i < cfg->num_varinfo; ++i) {
14013 MonoInst *ins = cfg->varinfo [i];
14016 if (mini_is_gsharedvt_variable_type (ins->inst_vtype)) {
14017 if (i >= cfg->locals_start) {
14019 idx = get_gsharedvt_info_slot (cfg, ins->inst_vtype, MONO_RGCTX_INFO_LOCAL_OFFSET);
14020 gsharedvt_vreg_to_idx [ins->dreg] = idx + 1;
14021 ins->opcode = OP_GSHAREDVT_LOCAL;
14022 ins->inst_imm = idx;
14025 gsharedvt_vreg_to_idx [ins->dreg] = -1;
14026 ins->opcode = OP_GSHAREDVT_ARG_REGOFFSET;
14032 /* FIXME: widening and truncation */
14035 * As an optimization, when a variable allocated to the stack is first loaded into
14036 * an lvreg, we will remember the lvreg and use it the next time instead of loading
14037 * the variable again.
14039 orig_next_vreg = cfg->next_vreg;
14040 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
14041 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
14045 * These arrays contain the first and last instructions accessing a given
14047 * Since we emit bblocks in the same order we process them here, and we
14048 * don't split live ranges, these will precisely describe the live range of
14049 * the variable, i.e. the instruction range where a valid value can be found
14050 * in the variables location.
14051 * The live range is computed using the liveness info computed by the liveness pass.
14052 * We can't use vmv->range, since that is an abstract live range, and we need
14053 * one which is instruction precise.
14054 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
14056 /* FIXME: Only do this if debugging info is requested */
14057 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
14058 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
14059 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
14060 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
14062 /* Add spill loads/stores */
14063 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
14066 if (cfg->verbose_level > 2)
14067 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
14069 /* Clear vreg_to_lvreg array */
14070 for (i = 0; i < lvregs_len; i++)
14071 vreg_to_lvreg [lvregs [i]] = 0;
14075 MONO_BB_FOR_EACH_INS (bb, ins) {
14076 const char *spec = INS_INFO (ins->opcode);
14077 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
14078 gboolean store, no_lvreg;
14079 int sregs [MONO_MAX_SRC_REGS];
14081 if (G_UNLIKELY (cfg->verbose_level > 2))
14082 mono_print_ins (ins);
14084 if (ins->opcode == OP_NOP)
14088 * We handle LDADDR here as well, since it can only be decomposed
14089 * when variable addresses are known.
14091 if (ins->opcode == OP_LDADDR) {
14092 MonoInst *var = ins->inst_p0;
14094 if (var->opcode == OP_VTARG_ADDR) {
14095 /* Happens on SPARC/S390 where vtypes are passed by reference */
14096 MonoInst *vtaddr = var->inst_left;
14097 if (vtaddr->opcode == OP_REGVAR) {
14098 ins->opcode = OP_MOVE;
14099 ins->sreg1 = vtaddr->dreg;
14101 else if (var->inst_left->opcode == OP_REGOFFSET) {
14102 ins->opcode = OP_LOAD_MEMBASE;
14103 ins->inst_basereg = vtaddr->inst_basereg;
14104 ins->inst_offset = vtaddr->inst_offset;
14107 } else if (cfg->gsharedvt && gsharedvt_vreg_to_idx [var->dreg] < 0) {
14108 /* gsharedvt arg passed by ref */
14109 g_assert (var->opcode == OP_GSHAREDVT_ARG_REGOFFSET);
14111 ins->opcode = OP_LOAD_MEMBASE;
14112 ins->inst_basereg = var->inst_basereg;
14113 ins->inst_offset = var->inst_offset;
14114 } else if (cfg->gsharedvt && gsharedvt_vreg_to_idx [var->dreg]) {
14115 MonoInst *load, *load2, *load3;
14116 int idx = gsharedvt_vreg_to_idx [var->dreg] - 1;
14117 int reg1, reg2, reg3;
14118 MonoInst *info_var = cfg->gsharedvt_info_var;
14119 MonoInst *locals_var = cfg->gsharedvt_locals_var;
14123 * Compute the address of the local as gsharedvt_locals_var + gsharedvt_info_var->locals_offsets [idx].
14126 g_assert (var->opcode == OP_GSHAREDVT_LOCAL);
14128 g_assert (info_var);
14129 g_assert (locals_var);
14131 /* Mark the instruction used to compute the locals var as used */
14132 cfg->gsharedvt_locals_var_ins = NULL;
14134 /* Load the offset */
14135 if (info_var->opcode == OP_REGOFFSET) {
14136 reg1 = alloc_ireg (cfg);
14137 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, reg1, info_var->inst_basereg, info_var->inst_offset);
14138 } else if (info_var->opcode == OP_REGVAR) {
14140 reg1 = info_var->dreg;
14142 g_assert_not_reached ();
14144 reg2 = alloc_ireg (cfg);
14145 NEW_LOAD_MEMBASE (cfg, load2, OP_LOADI4_MEMBASE, reg2, reg1, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
14146 /* Load the locals area address */
14147 reg3 = alloc_ireg (cfg);
14148 if (locals_var->opcode == OP_REGOFFSET) {
14149 NEW_LOAD_MEMBASE (cfg, load3, OP_LOAD_MEMBASE, reg3, locals_var->inst_basereg, locals_var->inst_offset);
14150 } else if (locals_var->opcode == OP_REGVAR) {
14151 NEW_UNALU (cfg, load3, OP_MOVE, reg3, locals_var->dreg);
14153 g_assert_not_reached ();
14155 /* Compute the address */
14156 ins->opcode = OP_PADD;
14160 mono_bblock_insert_before_ins (bb, ins, load3);
14161 mono_bblock_insert_before_ins (bb, load3, load2);
14163 mono_bblock_insert_before_ins (bb, load2, load);
14165 g_assert (var->opcode == OP_REGOFFSET);
14167 ins->opcode = OP_ADD_IMM;
14168 ins->sreg1 = var->inst_basereg;
14169 ins->inst_imm = var->inst_offset;
14172 *need_local_opts = TRUE;
14173 spec = INS_INFO (ins->opcode);
14176 if (ins->opcode < MONO_CEE_LAST) {
14177 mono_print_ins (ins);
14178 g_assert_not_reached ();
14182 * Store opcodes have destbasereg in the dreg, but in reality, it is an
14186 if (MONO_IS_STORE_MEMBASE (ins)) {
14187 tmp_reg = ins->dreg;
14188 ins->dreg = ins->sreg2;
14189 ins->sreg2 = tmp_reg;
14192 spec2 [MONO_INST_DEST] = ' ';
14193 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
14194 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
14195 spec2 [MONO_INST_SRC3] = ' ';
14197 } else if (MONO_IS_STORE_MEMINDEX (ins))
14198 g_assert_not_reached ();
14203 if (G_UNLIKELY (cfg->verbose_level > 2)) {
14204 printf ("\t %.3s %d", spec, ins->dreg);
14205 num_sregs = mono_inst_get_src_registers (ins, sregs);
14206 for (srcindex = 0; srcindex < num_sregs; ++srcindex)
14207 printf (" %d", sregs [srcindex]);
14214 regtype = spec [MONO_INST_DEST];
14215 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
14218 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
14219 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
14220 MonoInst *store_ins;
14222 MonoInst *def_ins = ins;
14223 int dreg = ins->dreg; /* The original vreg */
14225 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
14227 if (var->opcode == OP_REGVAR) {
14228 ins->dreg = var->dreg;
14229 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
14231 * Instead of emitting a load+store, use a _membase opcode.
14233 g_assert (var->opcode == OP_REGOFFSET);
14234 if (ins->opcode == OP_MOVE) {
14238 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
14239 ins->inst_basereg = var->inst_basereg;
14240 ins->inst_offset = var->inst_offset;
14243 spec = INS_INFO (ins->opcode);
14247 g_assert (var->opcode == OP_REGOFFSET);
14249 prev_dreg = ins->dreg;
14251 /* Invalidate any previous lvreg for this vreg */
14252 vreg_to_lvreg [ins->dreg] = 0;
14256 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
14258 store_opcode = OP_STOREI8_MEMBASE_REG;
14261 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
14263 #if SIZEOF_REGISTER != 8
14264 if (regtype == 'l') {
14265 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
14266 mono_bblock_insert_after_ins (bb, ins, store_ins);
14267 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
14268 mono_bblock_insert_after_ins (bb, ins, store_ins);
14269 def_ins = store_ins;
14274 g_assert (store_opcode != OP_STOREV_MEMBASE);
14276 /* Try to fuse the store into the instruction itself */
14277 /* FIXME: Add more instructions */
14278 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
14279 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
14280 ins->inst_imm = ins->inst_c0;
14281 ins->inst_destbasereg = var->inst_basereg;
14282 ins->inst_offset = var->inst_offset;
14283 spec = INS_INFO (ins->opcode);
14284 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE) || (ins->opcode == OP_RMOVE))) {
14285 ins->opcode = store_opcode;
14286 ins->inst_destbasereg = var->inst_basereg;
14287 ins->inst_offset = var->inst_offset;
14291 tmp_reg = ins->dreg;
14292 ins->dreg = ins->sreg2;
14293 ins->sreg2 = tmp_reg;
14296 spec2 [MONO_INST_DEST] = ' ';
14297 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
14298 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
14299 spec2 [MONO_INST_SRC3] = ' ';
14301 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
14302 // FIXME: The backends expect the base reg to be in inst_basereg
14303 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
14305 ins->inst_basereg = var->inst_basereg;
14306 ins->inst_offset = var->inst_offset;
14307 spec = INS_INFO (ins->opcode);
14309 /* printf ("INS: "); mono_print_ins (ins); */
14310 /* Create a store instruction */
14311 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
14313 /* Insert it after the instruction */
14314 mono_bblock_insert_after_ins (bb, ins, store_ins);
14316 def_ins = store_ins;
14319 * We can't assign ins->dreg to var->dreg here, since the
14320 * sregs could use it. So set a flag, and do it after
14323 if ((!cfg->backend->use_fpstack || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
14324 dest_has_lvreg = TRUE;
14329 if (def_ins && !live_range_start [dreg]) {
14330 live_range_start [dreg] = def_ins;
14331 live_range_start_bb [dreg] = bb;
14334 if (cfg->compute_gc_maps && def_ins && (var->flags & MONO_INST_GC_TRACK)) {
14337 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_DEF);
14338 tmp->inst_c1 = dreg;
14339 mono_bblock_insert_after_ins (bb, def_ins, tmp);
14346 num_sregs = mono_inst_get_src_registers (ins, sregs);
14347 for (srcindex = 0; srcindex < 3; ++srcindex) {
14348 regtype = spec [MONO_INST_SRC1 + srcindex];
14349 sreg = sregs [srcindex];
14351 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
14352 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
14353 MonoInst *var = get_vreg_to_inst (cfg, sreg);
14354 MonoInst *use_ins = ins;
14355 MonoInst *load_ins;
14356 guint32 load_opcode;
14358 if (var->opcode == OP_REGVAR) {
14359 sregs [srcindex] = var->dreg;
14360 //mono_inst_set_src_registers (ins, sregs);
14361 live_range_end [sreg] = use_ins;
14362 live_range_end_bb [sreg] = bb;
14364 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
14367 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
14368 /* var->dreg is a hreg */
14369 tmp->inst_c1 = sreg;
14370 mono_bblock_insert_after_ins (bb, ins, tmp);
14376 g_assert (var->opcode == OP_REGOFFSET);
14378 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
14380 g_assert (load_opcode != OP_LOADV_MEMBASE);
14382 if (vreg_to_lvreg [sreg]) {
14383 g_assert (vreg_to_lvreg [sreg] != -1);
14385 /* The variable is already loaded to an lvreg */
14386 if (G_UNLIKELY (cfg->verbose_level > 2))
14387 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
14388 sregs [srcindex] = vreg_to_lvreg [sreg];
14389 //mono_inst_set_src_registers (ins, sregs);
14393 /* Try to fuse the load into the instruction */
14394 if ((srcindex == 0) && (op_to_op_src1_membase (cfg, load_opcode, ins->opcode) != -1)) {
14395 ins->opcode = op_to_op_src1_membase (cfg, load_opcode, ins->opcode);
14396 sregs [0] = var->inst_basereg;
14397 //mono_inst_set_src_registers (ins, sregs);
14398 ins->inst_offset = var->inst_offset;
14399 } else if ((srcindex == 1) && (op_to_op_src2_membase (cfg, load_opcode, ins->opcode) != -1)) {
14400 ins->opcode = op_to_op_src2_membase (cfg, load_opcode, ins->opcode);
14401 sregs [1] = var->inst_basereg;
14402 //mono_inst_set_src_registers (ins, sregs);
14403 ins->inst_offset = var->inst_offset;
14405 if (MONO_IS_REAL_MOVE (ins)) {
14406 ins->opcode = OP_NOP;
14409 //printf ("%d ", srcindex); mono_print_ins (ins);
14411 sreg = alloc_dreg (cfg, stacktypes [regtype]);
14413 if ((!cfg->backend->use_fpstack || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
14414 if (var->dreg == prev_dreg) {
14416 * sreg refers to the value loaded by the load
14417 * emitted below, but we need to use ins->dreg
14418 * since it refers to the store emitted earlier.
14422 g_assert (sreg != -1);
14423 vreg_to_lvreg [var->dreg] = sreg;
14424 g_assert (lvregs_len < 1024);
14425 lvregs [lvregs_len ++] = var->dreg;
14429 sregs [srcindex] = sreg;
14430 //mono_inst_set_src_registers (ins, sregs);
14432 #if SIZEOF_REGISTER != 8
14433 if (regtype == 'l') {
14434 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
14435 mono_bblock_insert_before_ins (bb, ins, load_ins);
14436 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
14437 mono_bblock_insert_before_ins (bb, ins, load_ins);
14438 use_ins = load_ins;
14443 #if SIZEOF_REGISTER == 4
14444 g_assert (load_opcode != OP_LOADI8_MEMBASE);
14446 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
14447 mono_bblock_insert_before_ins (bb, ins, load_ins);
14448 use_ins = load_ins;
14452 if (var->dreg < orig_next_vreg) {
14453 live_range_end [var->dreg] = use_ins;
14454 live_range_end_bb [var->dreg] = bb;
14457 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
14460 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
14461 tmp->inst_c1 = var->dreg;
14462 mono_bblock_insert_after_ins (bb, ins, tmp);
14466 mono_inst_set_src_registers (ins, sregs);
14468 if (dest_has_lvreg) {
14469 g_assert (ins->dreg != -1);
14470 vreg_to_lvreg [prev_dreg] = ins->dreg;
14471 g_assert (lvregs_len < 1024);
14472 lvregs [lvregs_len ++] = prev_dreg;
14473 dest_has_lvreg = FALSE;
14477 tmp_reg = ins->dreg;
14478 ins->dreg = ins->sreg2;
14479 ins->sreg2 = tmp_reg;
14482 if (MONO_IS_CALL (ins)) {
14483 /* Clear vreg_to_lvreg array */
14484 for (i = 0; i < lvregs_len; i++)
14485 vreg_to_lvreg [lvregs [i]] = 0;
14487 } else if (ins->opcode == OP_NOP) {
14489 MONO_INST_NULLIFY_SREGS (ins);
14492 if (cfg->verbose_level > 2)
14493 mono_print_ins_index (1, ins);
14496 /* Extend the live range based on the liveness info */
14497 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
14498 for (i = 0; i < cfg->num_varinfo; i ++) {
14499 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
14501 if (vreg_is_volatile (cfg, vi->vreg))
14502 /* The liveness info is incomplete */
14505 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
14506 /* Live from at least the first ins of this bb */
14507 live_range_start [vi->vreg] = bb->code;
14508 live_range_start_bb [vi->vreg] = bb;
14511 if (mono_bitset_test_fast (bb->live_out_set, i)) {
14512 /* Live at least until the last ins of this bb */
14513 live_range_end [vi->vreg] = bb->last_ins;
14514 live_range_end_bb [vi->vreg] = bb;
14521 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
14522 * by storing the current native offset into MonoMethodVar->live_range_start/end.
14524 if (cfg->backend->have_liverange_ops && cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
14525 for (i = 0; i < cfg->num_varinfo; ++i) {
14526 int vreg = MONO_VARINFO (cfg, i)->vreg;
14529 if (live_range_start [vreg]) {
14530 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
14532 ins->inst_c1 = vreg;
14533 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
14535 if (live_range_end [vreg]) {
14536 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
14538 ins->inst_c1 = vreg;
14539 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
14540 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
14542 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
14547 if (cfg->gsharedvt_locals_var_ins) {
14548 /* Nullify if unused */
14549 cfg->gsharedvt_locals_var_ins->opcode = OP_PCONST;
14550 cfg->gsharedvt_locals_var_ins->inst_imm = 0;
14553 g_free (live_range_start);
14554 g_free (live_range_end);
14555 g_free (live_range_start_bb);
14556 g_free (live_range_end_bb);
14561 * - use 'iadd' instead of 'int_add'
14562 * - handling ovf opcodes: decompose in method_to_ir.
14563 * - unify iregs/fregs
14564 * -> partly done, the missing parts are:
14565 * - a more complete unification would involve unifying the hregs as well, so
14566 * code wouldn't need if (fp) all over the place. but that would mean the hregs
14567 * would no longer map to the machine hregs, so the code generators would need to
14568 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
14569 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
14570 * fp/non-fp branches speeds it up by about 15%.
14571 * - use sext/zext opcodes instead of shifts
14573 * - get rid of TEMPLOADs if possible and use vregs instead
14574 * - clean up usage of OP_P/OP_ opcodes
14575 * - cleanup usage of DUMMY_USE
14576 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
14578 * - set the stack type and allocate a dreg in the EMIT_NEW macros
14579 * - get rid of all the <foo>2 stuff when the new JIT is ready.
14580 * - make sure handle_stack_args () is called before the branch is emitted
14581 * - when the new IR is done, get rid of all unused stuff
14582 * - COMPARE/BEQ as separate instructions or unify them ?
14583 * - keeping them separate allows specialized compare instructions like
14584 * compare_imm, compare_membase
14585 * - most back ends unify fp compare+branch, fp compare+ceq
14586 * - integrate mono_save_args into inline_method
14587 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
14588 * - handle long shift opts on 32 bit platforms somehow: they require
14589 * 3 sregs (2 for arg1 and 1 for arg2)
14590 * - make byref a 'normal' type.
14591 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
14592 * variable if needed.
14593 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
14594 * like inline_method.
14595 * - remove inlining restrictions
14596 * - fix LNEG and enable cfold of INEG
14597 * - generalize x86 optimizations like ldelema as a peephole optimization
14598 * - add store_mem_imm for amd64
14599 * - optimize the loading of the interruption flag in the managed->native wrappers
14600 * - avoid special handling of OP_NOP in passes
14601 * - move code inserting instructions into one function/macro.
14602 * - try a coalescing phase after liveness analysis
14603 * - add float -> vreg conversion + local optimizations on !x86
14604 * - figure out how to handle decomposed branches during optimizations, ie.
14605 * compare+branch, op_jump_table+op_br etc.
14606 * - promote RuntimeXHandles to vregs
14607 * - vtype cleanups:
14608 * - add a NEW_VARLOADA_VREG macro
14609 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
14610 * accessing vtype fields.
14611 * - get rid of I8CONST on 64 bit platforms
14612 * - dealing with the increase in code size due to branches created during opcode
14614 * - use extended basic blocks
14615 * - all parts of the JIT
14616 * - handle_global_vregs () && local regalloc
14617 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
14618 * - sources of increase in code size:
14621 * - isinst and castclass
14622 * - lvregs not allocated to global registers even if used multiple times
14623 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
14625 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
14626 * - add all micro optimizations from the old JIT
14627 * - put tree optimizations into the deadce pass
14628 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
14629 * specific function.
14630 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
14631 * fcompare + branchCC.
14632 * - create a helper function for allocating a stack slot, taking into account
14633 * MONO_CFG_HAS_SPILLUP.
14635 * - merge the ia64 switch changes.
14636 * - optimize mono_regstate2_alloc_int/float.
14637 * - fix the pessimistic handling of variables accessed in exception handler blocks.
14638 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
14639 * parts of the tree could be separated by other instructions, killing the tree
14640 * arguments, or stores killing loads etc. Also, should we fold loads into other
14641 * instructions if the result of the load is used multiple times ?
14642 * - make the REM_IMM optimization in mini-x86.c arch-independent.
14643 * - LAST MERGE: 108395.
14644 * - when returning vtypes in registers, generate IR and append it to the end of the
14645 * last bb instead of doing it in the epilog.
14646 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
14654 - When to decompose opcodes:
14655 - earlier: this makes some optimizations hard to implement, since the low level IR
14656 no longer contains the neccessary information. But it is easier to do.
14657 - later: harder to implement, enables more optimizations.
14658 - Branches inside bblocks:
14659 - created when decomposing complex opcodes.
14660 - branches to another bblock: harmless, but not tracked by the branch
14661 optimizations, so need to branch to a label at the start of the bblock.
14662 - branches to inside the same bblock: very problematic, trips up the local
14663 reg allocator. Can be fixed by spitting the current bblock, but that is a
14664 complex operation, since some local vregs can become global vregs etc.
14665 - Local/global vregs:
14666 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
14667 local register allocator.
14668 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
14669 structure, created by mono_create_var (). Assigned to hregs or the stack by
14670 the global register allocator.
14671 - When to do optimizations like alu->alu_imm:
14672 - earlier -> saves work later on since the IR will be smaller/simpler
14673 - later -> can work on more instructions
14674 - Handling of valuetypes:
14675 - When a vtype is pushed on the stack, a new temporary is created, an
14676 instruction computing its address (LDADDR) is emitted and pushed on
14677 the stack. Need to optimize cases when the vtype is used immediately as in
14678 argument passing, stloc etc.
14679 - Instead of the to_end stuff in the old JIT, simply call the function handling
14680 the values on the stack before emitting the last instruction of the bb.
14683 #endif /* DISABLE_JIT */