2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
9 * Copyright 2003-2010 Novell, Inc (http://www.novell.com)
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
27 #ifdef HAVE_SYS_TIME_H
35 #include <mono/utils/memcheck.h>
37 #include <mono/metadata/abi-details.h>
38 #include <mono/metadata/assembly.h>
39 #include <mono/metadata/attrdefs.h>
40 #include <mono/metadata/loader.h>
41 #include <mono/metadata/tabledefs.h>
42 #include <mono/metadata/class.h>
43 #include <mono/metadata/object.h>
44 #include <mono/metadata/exception.h>
45 #include <mono/metadata/opcodes.h>
46 #include <mono/metadata/mono-endian.h>
47 #include <mono/metadata/tokentype.h>
48 #include <mono/metadata/tabledefs.h>
49 #include <mono/metadata/marshal.h>
50 #include <mono/metadata/debug-helpers.h>
51 #include <mono/metadata/mono-debug.h>
52 #include <mono/metadata/mono-debug-debugger.h>
53 #include <mono/metadata/gc-internal.h>
54 #include <mono/metadata/security-manager.h>
55 #include <mono/metadata/threads-types.h>
56 #include <mono/metadata/security-core-clr.h>
57 #include <mono/metadata/monitor.h>
58 #include <mono/metadata/profiler-private.h>
59 #include <mono/metadata/profiler.h>
60 #include <mono/metadata/debug-mono-symfile.h>
61 #include <mono/utils/mono-compiler.h>
62 #include <mono/utils/mono-memory-model.h>
63 #include <mono/metadata/mono-basic-block.h>
69 #include "jit-icalls.h"
71 #include "debugger-agent.h"
72 #include "seq-points.h"
74 #define BRANCH_COST 10
75 #define INLINE_LENGTH_LIMIT 20
77 /* These have 'cfg' as an implicit argument */
78 #define INLINE_FAILURE(msg) do { \
79 if ((cfg->method != cfg->current_method) && (cfg->current_method->wrapper_type == MONO_WRAPPER_NONE)) { \
80 inline_failure (cfg, msg); \
81 goto exception_exit; \
84 #define CHECK_CFG_EXCEPTION do {\
85 if (cfg->exception_type != MONO_EXCEPTION_NONE) \
86 goto exception_exit; \
88 #define METHOD_ACCESS_FAILURE(method, cmethod) do { \
89 method_access_failure ((cfg), (method), (cmethod)); \
90 goto exception_exit; \
92 #define FIELD_ACCESS_FAILURE(method, field) do { \
93 field_access_failure ((cfg), (method), (field)); \
94 goto exception_exit; \
96 #define GENERIC_SHARING_FAILURE(opcode) do { \
98 gshared_failure (cfg, opcode, __FILE__, __LINE__); \
99 goto exception_exit; \
102 #define GSHAREDVT_FAILURE(opcode) do { \
103 if (cfg->gsharedvt) { \
104 gsharedvt_failure (cfg, opcode, __FILE__, __LINE__); \
105 goto exception_exit; \
108 #define OUT_OF_MEMORY_FAILURE do { \
109 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OUT_OF_MEMORY); \
110 goto exception_exit; \
112 #define DISABLE_AOT(cfg) do { \
113 if ((cfg)->verbose_level >= 2) \
114 printf ("AOT disabled: %s:%d\n", __FILE__, __LINE__); \
115 (cfg)->disable_aot = TRUE; \
117 #define LOAD_ERROR do { \
118 break_on_unverified (); \
119 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD); \
120 goto exception_exit; \
123 #define TYPE_LOAD_ERROR(klass) do { \
124 cfg->exception_ptr = klass; \
128 #define CHECK_CFG_ERROR do {\
129 if (!mono_error_ok (&cfg->error)) { \
130 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); \
131 goto mono_error_exit; \
135 /* Determine whenever 'ins' represents a load of the 'this' argument */
136 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
138 static int ldind_to_load_membase (int opcode);
139 static int stind_to_store_membase (int opcode);
141 int mono_op_to_op_imm (int opcode);
142 int mono_op_to_op_imm_noemul (int opcode);
144 MONO_API MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
146 static int inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
147 guchar *ip, guint real_offset, gboolean inline_always);
149 /* helper methods signatures */
150 static MonoMethodSignature *helper_sig_domain_get;
151 static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
152 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline;
153 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline_llvm;
154 static MonoMethodSignature *helper_sig_monitor_enter_v4_trampoline_llvm;
157 * Instruction metadata
165 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
166 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
172 #if SIZEOF_REGISTER == 8 && SIZEOF_REGISTER == SIZEOF_VOID_P
177 /* keep in sync with the enum in mini.h */
180 #include "mini-ops.h"
185 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
186 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
188 * This should contain the index of the last sreg + 1. This is not the same
189 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
191 const gint8 ins_sreg_counts[] = {
192 #include "mini-ops.h"
197 #define MONO_INIT_VARINFO(vi,id) do { \
198 (vi)->range.first_use.pos.bid = 0xffff; \
204 mono_alloc_ireg (MonoCompile *cfg)
206 return alloc_ireg (cfg);
210 mono_alloc_lreg (MonoCompile *cfg)
212 return alloc_lreg (cfg);
216 mono_alloc_freg (MonoCompile *cfg)
218 return alloc_freg (cfg);
222 mono_alloc_preg (MonoCompile *cfg)
224 return alloc_preg (cfg);
228 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
230 return alloc_dreg (cfg, stack_type);
234 * mono_alloc_ireg_ref:
236 * Allocate an IREG, and mark it as holding a GC ref.
239 mono_alloc_ireg_ref (MonoCompile *cfg)
241 return alloc_ireg_ref (cfg);
245 * mono_alloc_ireg_mp:
247 * Allocate an IREG, and mark it as holding a managed pointer.
250 mono_alloc_ireg_mp (MonoCompile *cfg)
252 return alloc_ireg_mp (cfg);
256 * mono_alloc_ireg_copy:
258 * Allocate an IREG with the same GC type as VREG.
261 mono_alloc_ireg_copy (MonoCompile *cfg, guint32 vreg)
263 if (vreg_is_ref (cfg, vreg))
264 return alloc_ireg_ref (cfg);
265 else if (vreg_is_mp (cfg, vreg))
266 return alloc_ireg_mp (cfg);
268 return alloc_ireg (cfg);
272 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
277 type = mini_get_underlying_type (type);
279 switch (type->type) {
292 case MONO_TYPE_FNPTR:
294 case MONO_TYPE_CLASS:
295 case MONO_TYPE_STRING:
296 case MONO_TYPE_OBJECT:
297 case MONO_TYPE_SZARRAY:
298 case MONO_TYPE_ARRAY:
302 #if SIZEOF_REGISTER == 8
308 return cfg->r4fp ? OP_RMOVE : OP_FMOVE;
311 case MONO_TYPE_VALUETYPE:
312 if (type->data.klass->enumtype) {
313 type = mono_class_enum_basetype (type->data.klass);
316 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
319 case MONO_TYPE_TYPEDBYREF:
321 case MONO_TYPE_GENERICINST:
322 type = &type->data.generic_class->container_class->byval_arg;
326 g_assert (cfg->gshared);
327 if (mini_type_var_is_vt (type))
330 return mono_type_to_regmove (cfg, mini_get_underlying_type (type));
332 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
338 mono_print_bb (MonoBasicBlock *bb, const char *msg)
343 printf ("\n%s %d: [IN: ", msg, bb->block_num);
344 for (i = 0; i < bb->in_count; ++i)
345 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
347 for (i = 0; i < bb->out_count; ++i)
348 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
350 for (tree = bb->code; tree; tree = tree->next)
351 mono_print_ins_index (-1, tree);
355 mono_create_helper_signatures (void)
357 helper_sig_domain_get = mono_create_icall_signature ("ptr");
358 helper_sig_rgctx_lazy_fetch_trampoline = mono_create_icall_signature ("ptr ptr");
359 helper_sig_monitor_enter_exit_trampoline = mono_create_icall_signature ("void");
360 helper_sig_monitor_enter_exit_trampoline_llvm = mono_create_icall_signature ("void object");
361 helper_sig_monitor_enter_v4_trampoline_llvm = mono_create_icall_signature ("void object ptr");
364 static MONO_NEVER_INLINE void
365 break_on_unverified (void)
367 if (mini_get_debug_options ()->break_on_unverified)
371 static MONO_NEVER_INLINE void
372 method_access_failure (MonoCompile *cfg, MonoMethod *method, MonoMethod *cil_method)
374 char *method_fname = mono_method_full_name (method, TRUE);
375 char *cil_method_fname = mono_method_full_name (cil_method, TRUE);
376 mono_cfg_set_exception (cfg, MONO_EXCEPTION_METHOD_ACCESS);
377 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname);
378 g_free (method_fname);
379 g_free (cil_method_fname);
382 static MONO_NEVER_INLINE void
383 field_access_failure (MonoCompile *cfg, MonoMethod *method, MonoClassField *field)
385 char *method_fname = mono_method_full_name (method, TRUE);
386 char *field_fname = mono_field_full_name (field);
387 mono_cfg_set_exception (cfg, MONO_EXCEPTION_FIELD_ACCESS);
388 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname);
389 g_free (method_fname);
390 g_free (field_fname);
393 static MONO_NEVER_INLINE void
394 inline_failure (MonoCompile *cfg, const char *msg)
396 if (cfg->verbose_level >= 2)
397 printf ("inline failed: %s\n", msg);
398 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INLINE_FAILED);
401 static MONO_NEVER_INLINE void
402 gshared_failure (MonoCompile *cfg, int opcode, const char *file, int line)
404 if (cfg->verbose_level > 2) \
405 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), line);
406 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
409 static MONO_NEVER_INLINE void
410 gsharedvt_failure (MonoCompile *cfg, int opcode, const char *file, int line)
412 cfg->exception_message = g_strdup_printf ("gsharedvt failed for method %s.%s.%s/%d opcode %s %s:%d", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), file, line);
413 if (cfg->verbose_level >= 2)
414 printf ("%s\n", cfg->exception_message);
415 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
419 * When using gsharedvt, some instatiations might be verifiable, and some might be not. i.e.
420 * foo<T> (int i) { ldarg.0; box T; }
422 #define UNVERIFIED do { \
423 if (cfg->gsharedvt) { \
424 if (cfg->verbose_level > 2) \
425 printf ("gsharedvt method failed to verify, falling back to instantiation.\n"); \
426 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
427 goto exception_exit; \
429 break_on_unverified (); \
433 #define GET_BBLOCK(cfg,tblock,ip) do { \
434 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
436 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
437 NEW_BBLOCK (cfg, (tblock)); \
438 (tblock)->cil_code = (ip); \
439 ADD_BBLOCK (cfg, (tblock)); \
443 #if defined(TARGET_X86) || defined(TARGET_AMD64)
444 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
445 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
446 (dest)->dreg = alloc_ireg_mp ((cfg)); \
447 (dest)->sreg1 = (sr1); \
448 (dest)->sreg2 = (sr2); \
449 (dest)->inst_imm = (imm); \
450 (dest)->backend.shift_amount = (shift); \
451 MONO_ADD_INS ((cfg)->cbb, (dest)); \
455 /* Emit conversions so both operands of a binary opcode are of the same type */
457 add_widen_op (MonoCompile *cfg, MonoInst *ins, MonoInst **arg1_ref, MonoInst **arg2_ref)
459 MonoInst *arg1 = *arg1_ref;
460 MonoInst *arg2 = *arg2_ref;
463 ((arg1->type == STACK_R4 && arg2->type == STACK_R8) ||
464 (arg1->type == STACK_R8 && arg2->type == STACK_R4))) {
467 /* Mixing r4/r8 is allowed by the spec */
468 if (arg1->type == STACK_R4) {
469 int dreg = alloc_freg (cfg);
471 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg1->dreg);
472 conv->type = STACK_R8;
476 if (arg2->type == STACK_R4) {
477 int dreg = alloc_freg (cfg);
479 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg2->dreg);
480 conv->type = STACK_R8;
486 #if SIZEOF_REGISTER == 8
487 /* FIXME: Need to add many more cases */
488 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) {
491 int dr = alloc_preg (cfg);
492 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg);
493 (ins)->sreg2 = widen->dreg;
498 #define ADD_BINOP(op) do { \
499 MONO_INST_NEW (cfg, ins, (op)); \
501 ins->sreg1 = sp [0]->dreg; \
502 ins->sreg2 = sp [1]->dreg; \
503 type_from_op (cfg, ins, sp [0], sp [1]); \
505 /* Have to insert a widening op */ \
506 add_widen_op (cfg, ins, &sp [0], &sp [1]); \
507 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
508 MONO_ADD_INS ((cfg)->cbb, (ins)); \
509 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
512 #define ADD_UNOP(op) do { \
513 MONO_INST_NEW (cfg, ins, (op)); \
515 ins->sreg1 = sp [0]->dreg; \
516 type_from_op (cfg, ins, sp [0], NULL); \
518 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
519 MONO_ADD_INS ((cfg)->cbb, (ins)); \
520 *sp++ = mono_decompose_opcode (cfg, ins); \
523 #define ADD_BINCOND(next_block) do { \
526 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
527 cmp->sreg1 = sp [0]->dreg; \
528 cmp->sreg2 = sp [1]->dreg; \
529 type_from_op (cfg, cmp, sp [0], sp [1]); \
531 add_widen_op (cfg, cmp, &sp [0], &sp [1]); \
532 type_from_op (cfg, ins, sp [0], sp [1]); \
533 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
534 GET_BBLOCK (cfg, tblock, target); \
535 link_bblock (cfg, cfg->cbb, tblock); \
536 ins->inst_true_bb = tblock; \
537 if ((next_block)) { \
538 link_bblock (cfg, cfg->cbb, (next_block)); \
539 ins->inst_false_bb = (next_block); \
540 start_new_bblock = 1; \
542 GET_BBLOCK (cfg, tblock, ip); \
543 link_bblock (cfg, cfg->cbb, tblock); \
544 ins->inst_false_bb = tblock; \
545 start_new_bblock = 2; \
547 if (sp != stack_start) { \
548 handle_stack_args (cfg, stack_start, sp - stack_start); \
549 CHECK_UNVERIFIABLE (cfg); \
551 MONO_ADD_INS (cfg->cbb, cmp); \
552 MONO_ADD_INS (cfg->cbb, ins); \
556 * link_bblock: Links two basic blocks
558 * links two basic blocks in the control flow graph, the 'from'
559 * argument is the starting block and the 'to' argument is the block
560 * the control flow ends to after 'from'.
563 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
565 MonoBasicBlock **newa;
569 if (from->cil_code) {
571 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
573 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
576 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
578 printf ("edge from entry to exit\n");
583 for (i = 0; i < from->out_count; ++i) {
584 if (to == from->out_bb [i]) {
590 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
591 for (i = 0; i < from->out_count; ++i) {
592 newa [i] = from->out_bb [i];
600 for (i = 0; i < to->in_count; ++i) {
601 if (from == to->in_bb [i]) {
607 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
608 for (i = 0; i < to->in_count; ++i) {
609 newa [i] = to->in_bb [i];
618 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
620 link_bblock (cfg, from, to);
624 * mono_find_block_region:
626 * We mark each basic block with a region ID. We use that to avoid BB
627 * optimizations when blocks are in different regions.
630 * A region token that encodes where this region is, and information
631 * about the clause owner for this block.
633 * The region encodes the try/catch/filter clause that owns this block
634 * as well as the type. -1 is a special value that represents a block
635 * that is in none of try/catch/filter.
638 mono_find_block_region (MonoCompile *cfg, int offset)
640 MonoMethodHeader *header = cfg->header;
641 MonoExceptionClause *clause;
644 for (i = 0; i < header->num_clauses; ++i) {
645 clause = &header->clauses [i];
646 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
647 (offset < (clause->handler_offset)))
648 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
650 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
651 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
652 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
653 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
654 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
656 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
659 for (i = 0; i < header->num_clauses; ++i) {
660 clause = &header->clauses [i];
662 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
663 return ((i + 1) << 8) | clause->flags;
670 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
672 MonoMethodHeader *header = cfg->header;
673 MonoExceptionClause *clause;
677 for (i = 0; i < header->num_clauses; ++i) {
678 clause = &header->clauses [i];
679 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
680 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
681 if (clause->flags == type)
682 res = g_list_append (res, clause);
689 mono_create_spvar_for_region (MonoCompile *cfg, int region)
693 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
697 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
698 /* prevent it from being register allocated */
699 var->flags |= MONO_INST_VOLATILE;
701 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
705 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
707 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
711 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
715 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
719 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
720 /* prevent it from being register allocated */
721 var->flags |= MONO_INST_VOLATILE;
723 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
729 * Returns the type used in the eval stack when @type is loaded.
730 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
733 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
737 type = mini_get_underlying_type (type);
738 inst->klass = klass = mono_class_from_mono_type (type);
740 inst->type = STACK_MP;
745 switch (type->type) {
747 inst->type = STACK_INV;
755 inst->type = STACK_I4;
760 case MONO_TYPE_FNPTR:
761 inst->type = STACK_PTR;
763 case MONO_TYPE_CLASS:
764 case MONO_TYPE_STRING:
765 case MONO_TYPE_OBJECT:
766 case MONO_TYPE_SZARRAY:
767 case MONO_TYPE_ARRAY:
768 inst->type = STACK_OBJ;
772 inst->type = STACK_I8;
775 inst->type = cfg->r4_stack_type;
778 inst->type = STACK_R8;
780 case MONO_TYPE_VALUETYPE:
781 if (type->data.klass->enumtype) {
782 type = mono_class_enum_basetype (type->data.klass);
786 inst->type = STACK_VTYPE;
789 case MONO_TYPE_TYPEDBYREF:
790 inst->klass = mono_defaults.typed_reference_class;
791 inst->type = STACK_VTYPE;
793 case MONO_TYPE_GENERICINST:
794 type = &type->data.generic_class->container_class->byval_arg;
798 g_assert (cfg->gshared);
799 if (mini_is_gsharedvt_type (type)) {
800 g_assert (cfg->gsharedvt);
801 inst->type = STACK_VTYPE;
803 type_to_eval_stack_type (cfg, mini_get_underlying_type (type), inst);
807 g_error ("unknown type 0x%02x in eval stack type", type->type);
812 * The following tables are used to quickly validate the IL code in type_from_op ().
815 bin_num_table [STACK_MAX] [STACK_MAX] = {
816 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
817 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
818 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
819 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
820 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R8},
821 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
822 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
823 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
824 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4}
829 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4
832 /* reduce the size of this table */
834 bin_int_table [STACK_MAX] [STACK_MAX] = {
835 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
836 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
837 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
838 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
839 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
840 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
841 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
842 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
846 bin_comp_table [STACK_MAX] [STACK_MAX] = {
847 /* Inv i L p F & O vt r4 */
849 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
850 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
851 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
852 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* F, R8 */
853 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
854 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
855 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
856 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* r, r4 */
859 /* reduce the size of this table */
861 shift_table [STACK_MAX] [STACK_MAX] = {
862 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
863 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
864 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
865 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
866 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
867 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
868 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
869 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
873 * Tables to map from the non-specific opcode to the matching
874 * type-specific opcode.
876 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
878 binops_op_map [STACK_MAX] = {
879 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD, 0, 0, OP_RADD-CEE_ADD
882 /* handles from CEE_NEG to CEE_CONV_U8 */
884 unops_op_map [STACK_MAX] = {
885 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG, 0, 0, OP_RNEG-CEE_NEG
888 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
890 ovfops_op_map [STACK_MAX] = {
891 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, 0, OP_RCONV_TO_U2-CEE_CONV_U2
894 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
896 ovf2ops_op_map [STACK_MAX] = {
897 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, 0, 0, OP_RCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
900 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
902 ovf3ops_op_map [STACK_MAX] = {
903 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, 0, 0, OP_RCONV_TO_OVF_I1-CEE_CONV_OVF_I1
906 /* handles from CEE_BEQ to CEE_BLT_UN */
908 beqops_op_map [STACK_MAX] = {
909 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, 0, OP_FBEQ-CEE_BEQ
912 /* handles from CEE_CEQ to CEE_CLT_UN */
914 ceqops_op_map [STACK_MAX] = {
915 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, 0, OP_RCEQ-OP_CEQ
919 * Sets ins->type (the type on the eval stack) according to the
920 * type of the opcode and the arguments to it.
921 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
923 * FIXME: this function sets ins->type unconditionally in some cases, but
924 * it should set it to invalid for some types (a conv.x on an object)
927 type_from_op (MonoCompile *cfg, MonoInst *ins, MonoInst *src1, MonoInst *src2)
929 switch (ins->opcode) {
936 /* FIXME: check unverifiable args for STACK_MP */
937 ins->type = bin_num_table [src1->type] [src2->type];
938 ins->opcode += binops_op_map [ins->type];
945 ins->type = bin_int_table [src1->type] [src2->type];
946 ins->opcode += binops_op_map [ins->type];
951 ins->type = shift_table [src1->type] [src2->type];
952 ins->opcode += binops_op_map [ins->type];
957 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
958 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
959 ins->opcode = OP_LCOMPARE;
960 else if (src1->type == STACK_R4)
961 ins->opcode = OP_RCOMPARE;
962 else if (src1->type == STACK_R8)
963 ins->opcode = OP_FCOMPARE;
965 ins->opcode = OP_ICOMPARE;
967 case OP_ICOMPARE_IMM:
968 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
969 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
970 ins->opcode = OP_LCOMPARE_IMM;
982 ins->opcode += beqops_op_map [src1->type];
985 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
986 ins->opcode += ceqops_op_map [src1->type];
992 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
993 ins->opcode += ceqops_op_map [src1->type];
997 ins->type = neg_table [src1->type];
998 ins->opcode += unops_op_map [ins->type];
1001 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
1002 ins->type = src1->type;
1004 ins->type = STACK_INV;
1005 ins->opcode += unops_op_map [ins->type];
1011 ins->type = STACK_I4;
1012 ins->opcode += unops_op_map [src1->type];
1015 ins->type = STACK_R8;
1016 switch (src1->type) {
1019 ins->opcode = OP_ICONV_TO_R_UN;
1022 ins->opcode = OP_LCONV_TO_R_UN;
1026 case CEE_CONV_OVF_I1:
1027 case CEE_CONV_OVF_U1:
1028 case CEE_CONV_OVF_I2:
1029 case CEE_CONV_OVF_U2:
1030 case CEE_CONV_OVF_I4:
1031 case CEE_CONV_OVF_U4:
1032 ins->type = STACK_I4;
1033 ins->opcode += ovf3ops_op_map [src1->type];
1035 case CEE_CONV_OVF_I_UN:
1036 case CEE_CONV_OVF_U_UN:
1037 ins->type = STACK_PTR;
1038 ins->opcode += ovf2ops_op_map [src1->type];
1040 case CEE_CONV_OVF_I1_UN:
1041 case CEE_CONV_OVF_I2_UN:
1042 case CEE_CONV_OVF_I4_UN:
1043 case CEE_CONV_OVF_U1_UN:
1044 case CEE_CONV_OVF_U2_UN:
1045 case CEE_CONV_OVF_U4_UN:
1046 ins->type = STACK_I4;
1047 ins->opcode += ovf2ops_op_map [src1->type];
1050 ins->type = STACK_PTR;
1051 switch (src1->type) {
1053 ins->opcode = OP_ICONV_TO_U;
1057 #if SIZEOF_VOID_P == 8
1058 ins->opcode = OP_LCONV_TO_U;
1060 ins->opcode = OP_MOVE;
1064 ins->opcode = OP_LCONV_TO_U;
1067 ins->opcode = OP_FCONV_TO_U;
1073 ins->type = STACK_I8;
1074 ins->opcode += unops_op_map [src1->type];
1076 case CEE_CONV_OVF_I8:
1077 case CEE_CONV_OVF_U8:
1078 ins->type = STACK_I8;
1079 ins->opcode += ovf3ops_op_map [src1->type];
1081 case CEE_CONV_OVF_U8_UN:
1082 case CEE_CONV_OVF_I8_UN:
1083 ins->type = STACK_I8;
1084 ins->opcode += ovf2ops_op_map [src1->type];
1087 ins->type = cfg->r4_stack_type;
1088 ins->opcode += unops_op_map [src1->type];
1091 ins->type = STACK_R8;
1092 ins->opcode += unops_op_map [src1->type];
1095 ins->type = STACK_R8;
1099 ins->type = STACK_I4;
1100 ins->opcode += ovfops_op_map [src1->type];
1103 case CEE_CONV_OVF_I:
1104 case CEE_CONV_OVF_U:
1105 ins->type = STACK_PTR;
1106 ins->opcode += ovfops_op_map [src1->type];
1109 case CEE_ADD_OVF_UN:
1111 case CEE_MUL_OVF_UN:
1113 case CEE_SUB_OVF_UN:
1114 ins->type = bin_num_table [src1->type] [src2->type];
1115 ins->opcode += ovfops_op_map [src1->type];
1116 if (ins->type == STACK_R8)
1117 ins->type = STACK_INV;
1119 case OP_LOAD_MEMBASE:
1120 ins->type = STACK_PTR;
1122 case OP_LOADI1_MEMBASE:
1123 case OP_LOADU1_MEMBASE:
1124 case OP_LOADI2_MEMBASE:
1125 case OP_LOADU2_MEMBASE:
1126 case OP_LOADI4_MEMBASE:
1127 case OP_LOADU4_MEMBASE:
1128 ins->type = STACK_PTR;
1130 case OP_LOADI8_MEMBASE:
1131 ins->type = STACK_I8;
1133 case OP_LOADR4_MEMBASE:
1134 ins->type = cfg->r4_stack_type;
1136 case OP_LOADR8_MEMBASE:
1137 ins->type = STACK_R8;
1140 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
1144 if (ins->type == STACK_MP)
1145 ins->klass = mono_defaults.object_class;
1150 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
1156 param_table [STACK_MAX] [STACK_MAX] = {
1161 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
1165 switch (args->type) {
1175 for (i = 0; i < sig->param_count; ++i) {
1176 switch (args [i].type) {
1180 if (!sig->params [i]->byref)
1184 if (sig->params [i]->byref)
1186 switch (sig->params [i]->type) {
1187 case MONO_TYPE_CLASS:
1188 case MONO_TYPE_STRING:
1189 case MONO_TYPE_OBJECT:
1190 case MONO_TYPE_SZARRAY:
1191 case MONO_TYPE_ARRAY:
1198 if (sig->params [i]->byref)
1200 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1209 /*if (!param_table [args [i].type] [sig->params [i]->type])
1217 * When we need a pointer to the current domain many times in a method, we
1218 * call mono_domain_get() once and we store the result in a local variable.
1219 * This function returns the variable that represents the MonoDomain*.
1221 inline static MonoInst *
1222 mono_get_domainvar (MonoCompile *cfg)
1224 if (!cfg->domainvar)
1225 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1226 return cfg->domainvar;
1230 * The got_var contains the address of the Global Offset Table when AOT
1234 mono_get_got_var (MonoCompile *cfg)
1236 #ifdef MONO_ARCH_NEED_GOT_VAR
1237 if (!cfg->compile_aot)
1239 if (!cfg->got_var) {
1240 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1242 return cfg->got_var;
1249 mono_get_vtable_var (MonoCompile *cfg)
1251 g_assert (cfg->gshared);
1253 if (!cfg->rgctx_var) {
1254 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1255 /* force the var to be stack allocated */
1256 cfg->rgctx_var->flags |= MONO_INST_VOLATILE;
1259 return cfg->rgctx_var;
1263 type_from_stack_type (MonoInst *ins) {
1264 switch (ins->type) {
1265 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1266 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1267 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1268 case STACK_R4: return &mono_defaults.single_class->byval_arg;
1269 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1271 return &ins->klass->this_arg;
1272 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1273 case STACK_VTYPE: return &ins->klass->byval_arg;
1275 g_error ("stack type %d to monotype not handled\n", ins->type);
1280 static G_GNUC_UNUSED int
1281 type_to_stack_type (MonoCompile *cfg, MonoType *t)
1283 t = mono_type_get_underlying_type (t);
1295 case MONO_TYPE_FNPTR:
1297 case MONO_TYPE_CLASS:
1298 case MONO_TYPE_STRING:
1299 case MONO_TYPE_OBJECT:
1300 case MONO_TYPE_SZARRAY:
1301 case MONO_TYPE_ARRAY:
1307 return cfg->r4_stack_type;
1310 case MONO_TYPE_VALUETYPE:
1311 case MONO_TYPE_TYPEDBYREF:
1313 case MONO_TYPE_GENERICINST:
1314 if (mono_type_generic_inst_is_valuetype (t))
1320 g_assert_not_reached ();
1327 array_access_to_klass (int opcode)
1331 return mono_defaults.byte_class;
1333 return mono_defaults.uint16_class;
1336 return mono_defaults.int_class;
1339 return mono_defaults.sbyte_class;
1342 return mono_defaults.int16_class;
1345 return mono_defaults.int32_class;
1347 return mono_defaults.uint32_class;
1350 return mono_defaults.int64_class;
1353 return mono_defaults.single_class;
1356 return mono_defaults.double_class;
1357 case CEE_LDELEM_REF:
1358 case CEE_STELEM_REF:
1359 return mono_defaults.object_class;
1361 g_assert_not_reached ();
1367 * We try to share variables when possible
1370 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1375 /* inlining can result in deeper stacks */
1376 if (slot >= cfg->header->max_stack)
1377 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1379 pos = ins->type - 1 + slot * STACK_MAX;
1381 switch (ins->type) {
1388 if ((vnum = cfg->intvars [pos]))
1389 return cfg->varinfo [vnum];
1390 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1391 cfg->intvars [pos] = res->inst_c0;
1394 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1400 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1403 * Don't use this if a generic_context is set, since that means AOT can't
1404 * look up the method using just the image+token.
1405 * table == 0 means this is a reference made from a wrapper.
1407 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1408 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1409 jump_info_token->image = image;
1410 jump_info_token->token = token;
1411 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1416 * This function is called to handle items that are left on the evaluation stack
1417 * at basic block boundaries. What happens is that we save the values to local variables
1418 * and we reload them later when first entering the target basic block (with the
1419 * handle_loaded_temps () function).
1420 * A single joint point will use the same variables (stored in the array bb->out_stack or
1421 * bb->in_stack, if the basic block is before or after the joint point).
1423 * This function needs to be called _before_ emitting the last instruction of
1424 * the bb (i.e. before emitting a branch).
1425 * If the stack merge fails at a join point, cfg->unverifiable is set.
1428 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1431 MonoBasicBlock *bb = cfg->cbb;
1432 MonoBasicBlock *outb;
1433 MonoInst *inst, **locals;
1438 if (cfg->verbose_level > 3)
1439 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1440 if (!bb->out_scount) {
1441 bb->out_scount = count;
1442 //printf ("bblock %d has out:", bb->block_num);
1444 for (i = 0; i < bb->out_count; ++i) {
1445 outb = bb->out_bb [i];
1446 /* exception handlers are linked, but they should not be considered for stack args */
1447 if (outb->flags & BB_EXCEPTION_HANDLER)
1449 //printf (" %d", outb->block_num);
1450 if (outb->in_stack) {
1452 bb->out_stack = outb->in_stack;
1458 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1459 for (i = 0; i < count; ++i) {
1461 * try to reuse temps already allocated for this purpouse, if they occupy the same
1462 * stack slot and if they are of the same type.
1463 * This won't cause conflicts since if 'local' is used to
1464 * store one of the values in the in_stack of a bblock, then
1465 * the same variable will be used for the same outgoing stack
1467 * This doesn't work when inlining methods, since the bblocks
1468 * in the inlined methods do not inherit their in_stack from
1469 * the bblock they are inlined to. See bug #58863 for an
1472 if (cfg->inlined_method)
1473 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1475 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1480 for (i = 0; i < bb->out_count; ++i) {
1481 outb = bb->out_bb [i];
1482 /* exception handlers are linked, but they should not be considered for stack args */
1483 if (outb->flags & BB_EXCEPTION_HANDLER)
1485 if (outb->in_scount) {
1486 if (outb->in_scount != bb->out_scount) {
1487 cfg->unverifiable = TRUE;
1490 continue; /* check they are the same locals */
1492 outb->in_scount = count;
1493 outb->in_stack = bb->out_stack;
1496 locals = bb->out_stack;
1498 for (i = 0; i < count; ++i) {
1499 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1500 inst->cil_code = sp [i]->cil_code;
1501 sp [i] = locals [i];
1502 if (cfg->verbose_level > 3)
1503 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1507 * It is possible that the out bblocks already have in_stack assigned, and
1508 * the in_stacks differ. In this case, we will store to all the different
1515 /* Find a bblock which has a different in_stack */
1517 while (bindex < bb->out_count) {
1518 outb = bb->out_bb [bindex];
1519 /* exception handlers are linked, but they should not be considered for stack args */
1520 if (outb->flags & BB_EXCEPTION_HANDLER) {
1524 if (outb->in_stack != locals) {
1525 for (i = 0; i < count; ++i) {
1526 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1527 inst->cil_code = sp [i]->cil_code;
1528 sp [i] = locals [i];
1529 if (cfg->verbose_level > 3)
1530 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1532 locals = outb->in_stack;
1542 mini_emit_interface_bitmap_check (MonoCompile *cfg, int intf_bit_reg, int base_reg, int offset, MonoClass *klass)
1544 int ibitmap_reg = alloc_preg (cfg);
1545 #ifdef COMPRESSED_INTERFACE_BITMAP
1547 MonoInst *res, *ins;
1548 NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, ibitmap_reg, base_reg, offset);
1549 MONO_ADD_INS (cfg->cbb, ins);
1551 if (cfg->compile_aot)
1552 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_IID, klass);
1554 EMIT_NEW_ICONST (cfg, args [1], klass->interface_id);
1555 res = mono_emit_jit_icall (cfg, mono_class_interface_match, args);
1556 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, intf_bit_reg, res->dreg);
1558 int ibitmap_byte_reg = alloc_preg (cfg);
1560 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, base_reg, offset);
1562 if (cfg->compile_aot) {
1563 int iid_reg = alloc_preg (cfg);
1564 int shifted_iid_reg = alloc_preg (cfg);
1565 int ibitmap_byte_address_reg = alloc_preg (cfg);
1566 int masked_iid_reg = alloc_preg (cfg);
1567 int iid_one_bit_reg = alloc_preg (cfg);
1568 int iid_bit_reg = alloc_preg (cfg);
1569 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1570 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1571 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1572 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1573 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1574 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1575 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1576 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1578 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1579 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1585 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1586 * stored in "klass_reg" implements the interface "klass".
1589 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1591 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, interface_bitmap), klass);
1595 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1596 * stored in "vtable_reg" implements the interface "klass".
1599 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1601 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, interface_bitmap), klass);
1605 * Emit code which checks whenever the interface id of @klass is smaller than
1606 * than the value given by max_iid_reg.
1609 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1610 MonoBasicBlock *false_target)
1612 if (cfg->compile_aot) {
1613 int iid_reg = alloc_preg (cfg);
1614 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1615 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1618 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1620 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1622 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1625 /* Same as above, but obtains max_iid from a vtable */
1627 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1628 MonoBasicBlock *false_target)
1630 int max_iid_reg = alloc_preg (cfg);
1632 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, max_interface_id));
1633 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1636 /* Same as above, but obtains max_iid from a klass */
1638 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1639 MonoBasicBlock *false_target)
1641 int max_iid_reg = alloc_preg (cfg);
1643 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, max_interface_id));
1644 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1648 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1650 int idepth_reg = alloc_preg (cfg);
1651 int stypes_reg = alloc_preg (cfg);
1652 int stype = alloc_preg (cfg);
1654 mono_class_setup_supertypes (klass);
1656 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1657 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, idepth));
1658 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1659 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1661 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, supertypes));
1662 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1664 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1665 } else if (cfg->compile_aot) {
1666 int const_reg = alloc_preg (cfg);
1667 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1668 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1670 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1672 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1676 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1678 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1682 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1684 int intf_reg = alloc_preg (cfg);
1686 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1687 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1688 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1690 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1692 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1696 * Variant of the above that takes a register to the class, not the vtable.
1699 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1701 int intf_bit_reg = alloc_preg (cfg);
1703 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1704 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1705 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1707 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1709 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1713 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1716 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1717 } else if (cfg->compile_aot) {
1718 int const_reg = alloc_preg (cfg);
1719 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1720 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1722 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1724 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1728 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1730 mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1734 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1736 if (cfg->compile_aot) {
1737 int const_reg = alloc_preg (cfg);
1738 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1739 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1741 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1743 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1747 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1750 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1753 int rank_reg = alloc_preg (cfg);
1754 int eclass_reg = alloc_preg (cfg);
1756 g_assert (!klass_inst);
1757 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, rank));
1758 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1759 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1760 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
1761 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, cast_class));
1762 if (klass->cast_class == mono_defaults.object_class) {
1763 int parent_reg = alloc_preg (cfg);
1764 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, MONO_STRUCT_OFFSET (MonoClass, parent));
1765 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1766 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1767 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1768 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1769 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1770 } else if (klass->cast_class == mono_defaults.enum_class) {
1771 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1772 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1773 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1775 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1776 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1779 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1780 /* Check that the object is a vector too */
1781 int bounds_reg = alloc_preg (cfg);
1782 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, MONO_STRUCT_OFFSET (MonoArray, bounds));
1783 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1784 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1787 int idepth_reg = alloc_preg (cfg);
1788 int stypes_reg = alloc_preg (cfg);
1789 int stype = alloc_preg (cfg);
1791 mono_class_setup_supertypes (klass);
1793 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1794 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, idepth));
1795 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1796 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1798 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, supertypes));
1799 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1800 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1805 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1807 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1811 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1815 g_assert (val == 0);
1820 if ((size <= SIZEOF_REGISTER) && (size <= align)) {
1823 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1826 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1829 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1831 #if SIZEOF_REGISTER == 8
1833 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1839 val_reg = alloc_preg (cfg);
1841 if (SIZEOF_REGISTER == 8)
1842 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1844 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1847 /* This could be optimized further if neccesary */
1849 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1856 #if !NO_UNALIGNED_ACCESS
1857 if (SIZEOF_REGISTER == 8) {
1859 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1864 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1872 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1877 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1882 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1889 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1896 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1897 g_assert (size < 10000);
1900 /* This could be optimized further if neccesary */
1902 cur_reg = alloc_preg (cfg);
1903 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1904 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1911 #if !NO_UNALIGNED_ACCESS
1912 if (SIZEOF_REGISTER == 8) {
1914 cur_reg = alloc_preg (cfg);
1915 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1916 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1925 cur_reg = alloc_preg (cfg);
1926 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1927 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1933 cur_reg = alloc_preg (cfg);
1934 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1935 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1941 cur_reg = alloc_preg (cfg);
1942 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1943 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1951 emit_tls_set (MonoCompile *cfg, int sreg1, int tls_key)
1955 if (cfg->compile_aot) {
1956 EMIT_NEW_TLS_OFFSETCONST (cfg, c, tls_key);
1957 MONO_INST_NEW (cfg, ins, OP_TLS_SET_REG);
1959 ins->sreg2 = c->dreg;
1960 MONO_ADD_INS (cfg->cbb, ins);
1962 MONO_INST_NEW (cfg, ins, OP_TLS_SET);
1964 ins->inst_offset = mini_get_tls_offset (tls_key);
1965 MONO_ADD_INS (cfg->cbb, ins);
1972 * Emit IR to push the current LMF onto the LMF stack.
1975 emit_push_lmf (MonoCompile *cfg)
1978 * Emit IR to push the LMF:
1979 * lmf_addr = <lmf_addr from tls>
1980 * lmf->lmf_addr = lmf_addr
1981 * lmf->prev_lmf = *lmf_addr
1984 int lmf_reg, prev_lmf_reg;
1985 MonoInst *ins, *lmf_ins;
1990 if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
1991 /* Load current lmf */
1992 lmf_ins = mono_get_lmf_intrinsic (cfg);
1994 MONO_ADD_INS (cfg->cbb, lmf_ins);
1995 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
1996 lmf_reg = ins->dreg;
1997 /* Save previous_lmf */
1998 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), lmf_ins->dreg);
2000 emit_tls_set (cfg, lmf_reg, TLS_KEY_LMF);
2003 * Store lmf_addr in a variable, so it can be allocated to a global register.
2005 if (!cfg->lmf_addr_var)
2006 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2009 ins = mono_get_jit_tls_intrinsic (cfg);
2011 int jit_tls_dreg = ins->dreg;
2013 MONO_ADD_INS (cfg->cbb, ins);
2014 lmf_reg = alloc_preg (cfg);
2015 EMIT_NEW_BIALU_IMM (cfg, lmf_ins, OP_PADD_IMM, lmf_reg, jit_tls_dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
2017 lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
2020 lmf_ins = mono_get_lmf_addr_intrinsic (cfg);
2022 MONO_ADD_INS (cfg->cbb, lmf_ins);
2025 MonoInst *args [16], *jit_tls_ins, *ins;
2027 /* Inline mono_get_lmf_addr () */
2028 /* jit_tls = pthread_getspecific (mono_jit_tls_id); lmf_addr = &jit_tls->lmf; */
2030 /* Load mono_jit_tls_id */
2031 if (cfg->compile_aot)
2032 EMIT_NEW_AOTCONST (cfg, args [0], MONO_PATCH_INFO_JIT_TLS_ID, NULL);
2034 EMIT_NEW_ICONST (cfg, args [0], mono_jit_tls_id);
2035 /* call pthread_getspecific () */
2036 jit_tls_ins = mono_emit_jit_icall (cfg, pthread_getspecific, args);
2037 /* lmf_addr = &jit_tls->lmf */
2038 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, cfg->lmf_addr_var->dreg, jit_tls_ins->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
2041 lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
2045 lmf_ins->dreg = cfg->lmf_addr_var->dreg;
2047 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
2048 lmf_reg = ins->dreg;
2050 prev_lmf_reg = alloc_preg (cfg);
2051 /* Save previous_lmf */
2052 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, cfg->lmf_addr_var->dreg, 0);
2053 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), prev_lmf_reg);
2055 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, cfg->lmf_addr_var->dreg, 0, lmf_reg);
2062 * Emit IR to pop the current LMF from the LMF stack.
2065 emit_pop_lmf (MonoCompile *cfg)
2067 int lmf_reg, lmf_addr_reg, prev_lmf_reg;
2073 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
2074 lmf_reg = ins->dreg;
2076 if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
2077 /* Load previous_lmf */
2078 prev_lmf_reg = alloc_preg (cfg);
2079 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
2081 emit_tls_set (cfg, prev_lmf_reg, TLS_KEY_LMF);
2084 * Emit IR to pop the LMF:
2085 * *(lmf->lmf_addr) = lmf->prev_lmf
2087 /* This could be called before emit_push_lmf () */
2088 if (!cfg->lmf_addr_var)
2089 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2090 lmf_addr_reg = cfg->lmf_addr_var->dreg;
2092 prev_lmf_reg = alloc_preg (cfg);
2093 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
2094 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_addr_reg, 0, prev_lmf_reg);
2099 emit_instrumentation_call (MonoCompile *cfg, void *func)
2101 MonoInst *iargs [1];
2104 * Avoid instrumenting inlined methods since it can
2105 * distort profiling results.
2107 if (cfg->method != cfg->current_method)
2110 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE) {
2111 EMIT_NEW_METHODCONST (cfg, iargs [0], cfg->method);
2112 mono_emit_jit_icall (cfg, func, iargs);
2117 ret_type_to_call_opcode (MonoCompile *cfg, MonoType *type, int calli, int virt)
2120 type = mini_get_underlying_type (type);
2121 switch (type->type) {
2122 case MONO_TYPE_VOID:
2123 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALL_MEMBASE: OP_VOIDCALL;
2130 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2134 case MONO_TYPE_FNPTR:
2135 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2136 case MONO_TYPE_CLASS:
2137 case MONO_TYPE_STRING:
2138 case MONO_TYPE_OBJECT:
2139 case MONO_TYPE_SZARRAY:
2140 case MONO_TYPE_ARRAY:
2141 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2144 return calli? OP_LCALL_REG: virt? OP_LCALL_MEMBASE: OP_LCALL;
2147 return calli? OP_RCALL_REG: virt? OP_RCALL_MEMBASE: OP_RCALL;
2149 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
2151 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
2152 case MONO_TYPE_VALUETYPE:
2153 if (type->data.klass->enumtype) {
2154 type = mono_class_enum_basetype (type->data.klass);
2157 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2158 case MONO_TYPE_TYPEDBYREF:
2159 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2160 case MONO_TYPE_GENERICINST:
2161 type = &type->data.generic_class->container_class->byval_arg;
2164 case MONO_TYPE_MVAR:
2166 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2168 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
2174 * target_type_is_incompatible:
2175 * @cfg: MonoCompile context
2177 * Check that the item @arg on the evaluation stack can be stored
2178 * in the target type (can be a local, or field, etc).
2179 * The cfg arg can be used to check if we need verification or just
2182 * Returns: non-0 value if arg can't be stored on a target.
2185 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
2187 MonoType *simple_type;
2190 if (target->byref) {
2191 /* FIXME: check that the pointed to types match */
2192 if (arg->type == STACK_MP)
2193 return arg->klass != mono_class_from_mono_type (target);
2194 if (arg->type == STACK_PTR)
2199 simple_type = mini_get_underlying_type (target);
2200 switch (simple_type->type) {
2201 case MONO_TYPE_VOID:
2209 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
2213 /* STACK_MP is needed when setting pinned locals */
2214 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2219 case MONO_TYPE_FNPTR:
2221 * Some opcodes like ldloca returns 'transient pointers' which can be stored in
2222 * in native int. (#688008).
2224 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2227 case MONO_TYPE_CLASS:
2228 case MONO_TYPE_STRING:
2229 case MONO_TYPE_OBJECT:
2230 case MONO_TYPE_SZARRAY:
2231 case MONO_TYPE_ARRAY:
2232 if (arg->type != STACK_OBJ)
2234 /* FIXME: check type compatibility */
2238 if (arg->type != STACK_I8)
2242 if (arg->type != cfg->r4_stack_type)
2246 if (arg->type != STACK_R8)
2249 case MONO_TYPE_VALUETYPE:
2250 if (arg->type != STACK_VTYPE)
2252 klass = mono_class_from_mono_type (simple_type);
2253 if (klass != arg->klass)
2256 case MONO_TYPE_TYPEDBYREF:
2257 if (arg->type != STACK_VTYPE)
2259 klass = mono_class_from_mono_type (simple_type);
2260 if (klass != arg->klass)
2263 case MONO_TYPE_GENERICINST:
2264 if (mono_type_generic_inst_is_valuetype (simple_type)) {
2265 if (arg->type != STACK_VTYPE)
2267 klass = mono_class_from_mono_type (simple_type);
2268 /* The second cases is needed when doing partial sharing */
2269 if (klass != arg->klass && mono_class_from_mono_type (target) != arg->klass)
2273 if (arg->type != STACK_OBJ)
2275 /* FIXME: check type compatibility */
2279 case MONO_TYPE_MVAR:
2280 g_assert (cfg->gshared);
2281 if (mini_type_var_is_vt (simple_type)) {
2282 if (arg->type != STACK_VTYPE)
2285 if (arg->type != STACK_OBJ)
2290 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
2296 * Prepare arguments for passing to a function call.
2297 * Return a non-zero value if the arguments can't be passed to the given
2299 * The type checks are not yet complete and some conversions may need
2300 * casts on 32 or 64 bit architectures.
2302 * FIXME: implement this using target_type_is_incompatible ()
2305 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
2307 MonoType *simple_type;
2311 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
2315 for (i = 0; i < sig->param_count; ++i) {
2316 if (sig->params [i]->byref) {
2317 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
2321 simple_type = mini_get_underlying_type (sig->params [i]);
2323 switch (simple_type->type) {
2324 case MONO_TYPE_VOID:
2333 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2339 case MONO_TYPE_FNPTR:
2340 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2343 case MONO_TYPE_CLASS:
2344 case MONO_TYPE_STRING:
2345 case MONO_TYPE_OBJECT:
2346 case MONO_TYPE_SZARRAY:
2347 case MONO_TYPE_ARRAY:
2348 if (args [i]->type != STACK_OBJ)
2353 if (args [i]->type != STACK_I8)
2357 if (args [i]->type != cfg->r4_stack_type)
2361 if (args [i]->type != STACK_R8)
2364 case MONO_TYPE_VALUETYPE:
2365 if (simple_type->data.klass->enumtype) {
2366 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2369 if (args [i]->type != STACK_VTYPE)
2372 case MONO_TYPE_TYPEDBYREF:
2373 if (args [i]->type != STACK_VTYPE)
2376 case MONO_TYPE_GENERICINST:
2377 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2380 case MONO_TYPE_MVAR:
2382 if (args [i]->type != STACK_VTYPE)
2386 g_error ("unknown type 0x%02x in check_call_signature",
2394 callvirt_to_call (int opcode)
2397 case OP_CALL_MEMBASE:
2399 case OP_VOIDCALL_MEMBASE:
2401 case OP_FCALL_MEMBASE:
2403 case OP_RCALL_MEMBASE:
2405 case OP_VCALL_MEMBASE:
2407 case OP_LCALL_MEMBASE:
2410 g_assert_not_reached ();
2416 /* Either METHOD or IMT_ARG needs to be set */
2418 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoMethod *method, MonoInst *imt_arg)
2422 if (COMPILE_LLVM (cfg)) {
2423 method_reg = alloc_preg (cfg);
2426 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2427 } else if (cfg->compile_aot) {
2428 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, method, MONO_PATCH_INFO_METHODCONST);
2431 MONO_INST_NEW (cfg, ins, OP_PCONST);
2432 ins->inst_p0 = method;
2433 ins->dreg = method_reg;
2434 MONO_ADD_INS (cfg->cbb, ins);
2438 call->imt_arg_reg = method_reg;
2440 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2444 method_reg = alloc_preg (cfg);
2447 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2448 } else if (cfg->compile_aot) {
2449 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, method, MONO_PATCH_INFO_METHODCONST);
2452 MONO_INST_NEW (cfg, ins, OP_PCONST);
2453 ins->inst_p0 = method;
2454 ins->dreg = method_reg;
2455 MONO_ADD_INS (cfg->cbb, ins);
2458 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2461 static MonoJumpInfo *
2462 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2464 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2468 ji->data.target = target;
2474 mini_class_check_context_used (MonoCompile *cfg, MonoClass *klass)
2477 return mono_class_check_context_used (klass);
2483 mini_method_check_context_used (MonoCompile *cfg, MonoMethod *method)
2486 return mono_method_check_context_used (method);
2492 * check_method_sharing:
2494 * Check whenever the vtable or an mrgctx needs to be passed when calling CMETHOD.
2497 check_method_sharing (MonoCompile *cfg, MonoMethod *cmethod, gboolean *out_pass_vtable, gboolean *out_pass_mrgctx)
2499 gboolean pass_vtable = FALSE;
2500 gboolean pass_mrgctx = FALSE;
2502 if (((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
2503 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
2504 gboolean sharable = FALSE;
2506 if (mono_method_is_generic_sharable_full (cmethod, TRUE, TRUE, TRUE))
2510 * Pass vtable iff target method might
2511 * be shared, which means that sharing
2512 * is enabled for its class and its
2513 * context is sharable (and it's not a
2516 if (sharable && !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
2520 if (mini_method_get_context (cmethod) &&
2521 mini_method_get_context (cmethod)->method_inst) {
2522 g_assert (!pass_vtable);
2524 if (mono_method_is_generic_sharable_full (cmethod, TRUE, TRUE, TRUE)) {
2527 if (cfg->gsharedvt && mini_is_gsharedvt_signature (mono_method_signature (cmethod)))
2532 if (out_pass_vtable)
2533 *out_pass_vtable = pass_vtable;
2534 if (out_pass_mrgctx)
2535 *out_pass_mrgctx = pass_mrgctx;
2538 inline static MonoCallInst *
2539 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2540 MonoInst **args, int calli, int virtual, int tail, int rgctx, int unbox_trampoline)
2544 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2549 emit_instrumentation_call (cfg, mono_profiler_method_leave);
2551 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2553 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (cfg, sig->ret, calli, virtual));
2556 call->signature = sig;
2557 call->rgctx_reg = rgctx;
2558 sig_ret = mini_get_underlying_type (sig->ret);
2560 type_to_eval_stack_type ((cfg), sig_ret, &call->inst);
2563 if (mini_type_is_vtype (sig_ret)) {
2564 call->vret_var = cfg->vret_addr;
2565 //g_assert_not_reached ();
2567 } else if (mini_type_is_vtype (sig_ret)) {
2568 MonoInst *temp = mono_compile_create_var (cfg, sig_ret, OP_LOCAL);
2571 temp->backend.is_pinvoke = sig->pinvoke;
2574 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2575 * address of return value to increase optimization opportunities.
2576 * Before vtype decomposition, the dreg of the call ins itself represents the
2577 * fact the call modifies the return value. After decomposition, the call will
2578 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2579 * will be transformed into an LDADDR.
2581 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2582 loada->dreg = alloc_preg (cfg);
2583 loada->inst_p0 = temp;
2584 /* We reference the call too since call->dreg could change during optimization */
2585 loada->inst_p1 = call;
2586 MONO_ADD_INS (cfg->cbb, loada);
2588 call->inst.dreg = temp->dreg;
2590 call->vret_var = loada;
2591 } else if (!MONO_TYPE_IS_VOID (sig_ret))
2592 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2594 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2595 if (COMPILE_SOFT_FLOAT (cfg)) {
2597 * If the call has a float argument, we would need to do an r8->r4 conversion using
2598 * an icall, but that cannot be done during the call sequence since it would clobber
2599 * the call registers + the stack. So we do it before emitting the call.
2601 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2603 MonoInst *in = call->args [i];
2605 if (i >= sig->hasthis)
2606 t = sig->params [i - sig->hasthis];
2608 t = &mono_defaults.int_class->byval_arg;
2609 t = mono_type_get_underlying_type (t);
2611 if (!t->byref && t->type == MONO_TYPE_R4) {
2612 MonoInst *iargs [1];
2616 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2618 /* The result will be in an int vreg */
2619 call->args [i] = conv;
2625 call->need_unbox_trampoline = unbox_trampoline;
2628 if (COMPILE_LLVM (cfg))
2629 mono_llvm_emit_call (cfg, call);
2631 mono_arch_emit_call (cfg, call);
2633 mono_arch_emit_call (cfg, call);
2636 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2637 cfg->flags |= MONO_CFG_HAS_CALLS;
2643 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2645 #ifdef MONO_ARCH_RGCTX_REG
2646 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2647 cfg->uses_rgctx_reg = TRUE;
2648 call->rgctx_reg = TRUE;
2650 call->rgctx_arg_reg = rgctx_reg;
2657 inline static MonoInst*
2658 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *imt_arg, MonoInst *rgctx_arg)
2663 gboolean check_sp = FALSE;
2665 if (cfg->check_pinvoke_callconv && cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
2666 WrapperInfo *info = mono_marshal_get_wrapper_info (cfg->method);
2668 if (info && info->subtype == WRAPPER_SUBTYPE_PINVOKE)
2673 rgctx_reg = mono_alloc_preg (cfg);
2674 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2678 if (!cfg->stack_inbalance_var)
2679 cfg->stack_inbalance_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2681 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2682 ins->dreg = cfg->stack_inbalance_var->dreg;
2683 MONO_ADD_INS (cfg->cbb, ins);
2686 call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE, rgctx_arg ? TRUE : FALSE, FALSE);
2688 call->inst.sreg1 = addr->dreg;
2691 emit_imt_argument (cfg, call, NULL, imt_arg);
2693 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2698 sp_reg = mono_alloc_preg (cfg);
2700 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2702 MONO_ADD_INS (cfg->cbb, ins);
2704 /* Restore the stack so we don't crash when throwing the exception */
2705 MONO_INST_NEW (cfg, ins, OP_SET_SP);
2706 ins->sreg1 = cfg->stack_inbalance_var->dreg;
2707 MONO_ADD_INS (cfg->cbb, ins);
2709 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, cfg->stack_inbalance_var->dreg, sp_reg);
2710 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ExecutionEngineException");
2714 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2716 return (MonoInst*)call;
2720 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2723 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type);
2725 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2728 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig, gboolean tail,
2729 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *rgctx_arg)
2731 #ifndef DISABLE_REMOTING
2732 gboolean might_be_remote = FALSE;
2734 gboolean virtual = this != NULL;
2735 gboolean enable_for_aot = TRUE;
2739 gboolean need_unbox_trampoline;
2742 sig = mono_method_signature (method);
2745 rgctx_reg = mono_alloc_preg (cfg);
2746 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2749 if (method->string_ctor) {
2750 /* Create the real signature */
2751 /* FIXME: Cache these */
2752 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2753 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2758 context_used = mini_method_check_context_used (cfg, method);
2760 #ifndef DISABLE_REMOTING
2761 might_be_remote = this && sig->hasthis &&
2762 (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) &&
2763 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && (!MONO_CHECK_THIS (this) || context_used);
2765 if (might_be_remote && context_used) {
2768 g_assert (cfg->gshared);
2770 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2772 return mono_emit_calli (cfg, sig, args, addr, NULL, NULL);
2776 need_unbox_trampoline = method->klass == mono_defaults.object_class || (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE);
2778 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, tail, rgctx_arg ? TRUE : FALSE, need_unbox_trampoline);
2780 #ifndef DISABLE_REMOTING
2781 if (might_be_remote)
2782 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2785 call->method = method;
2786 call->inst.flags |= MONO_INST_HAS_METHOD;
2787 call->inst.inst_left = this;
2788 call->tail_call = tail;
2791 int vtable_reg, slot_reg, this_reg;
2794 this_reg = this->dreg;
2796 if ((method->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (method->name, "Invoke")) {
2797 MonoInst *dummy_use;
2799 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2801 /* Make a call to delegate->invoke_impl */
2802 call->inst.inst_basereg = this_reg;
2803 call->inst.inst_offset = MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2804 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2806 /* We must emit a dummy use here because the delegate trampoline will
2807 replace the 'this' argument with the delegate target making this activation
2808 no longer a root for the delegate.
2809 This is an issue for delegates that target collectible code such as dynamic
2810 methods of GC'able assemblies.
2812 For a test case look into #667921.
2814 FIXME: a dummy use is not the best way to do it as the local register allocator
2815 will put it on a caller save register and spil it around the call.
2816 Ideally, we would either put it on a callee save register or only do the store part.
2818 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [0]);
2820 return (MonoInst*)call;
2823 if ((!cfg->compile_aot || enable_for_aot) &&
2824 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2825 (MONO_METHOD_IS_FINAL (method) &&
2826 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2827 !(mono_class_is_marshalbyref (method->klass) && context_used)) {
2829 * the method is not virtual, we just need to ensure this is not null
2830 * and then we can call the method directly.
2832 #ifndef DISABLE_REMOTING
2833 if (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) {
2835 * The check above ensures method is not gshared, this is needed since
2836 * gshared methods can't have wrappers.
2838 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2842 if (!method->string_ctor)
2843 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2845 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2846 } else if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2848 * the method is virtual, but we can statically dispatch since either
2849 * it's class or the method itself are sealed.
2850 * But first we need to ensure it's not a null reference.
2852 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2854 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2856 vtable_reg = alloc_preg (cfg);
2857 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2858 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2859 guint32 imt_slot = mono_method_get_imt_slot (method);
2860 emit_imt_argument (cfg, call, call->method, imt_arg);
2861 slot_reg = vtable_reg;
2862 offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2864 slot_reg = vtable_reg;
2865 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) +
2866 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2868 g_assert (mono_method_signature (method)->generic_param_count);
2869 emit_imt_argument (cfg, call, call->method, imt_arg);
2873 call->inst.sreg1 = slot_reg;
2874 call->inst.inst_offset = offset;
2875 call->virtual = TRUE;
2879 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2882 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2884 return (MonoInst*)call;
2888 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2890 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), FALSE, args, this, NULL, NULL);
2894 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2901 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE, FALSE, FALSE);
2904 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2906 return (MonoInst*)call;
2910 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2912 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2916 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2920 * mono_emit_abs_call:
2922 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2924 inline static MonoInst*
2925 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2926 MonoMethodSignature *sig, MonoInst **args)
2928 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2932 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2935 if (cfg->abs_patches == NULL)
2936 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2937 g_hash_table_insert (cfg->abs_patches, ji, ji);
2938 ins = mono_emit_native_call (cfg, ji, sig, args);
2939 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2944 direct_icalls_enabled (MonoCompile *cfg)
2946 /* LLVM on amd64 can't handle calls to non-32 bit addresses */
2948 if (cfg->compile_llvm)
2951 if (cfg->gen_sdb_seq_points || cfg->disable_direct_icalls)
2957 mono_emit_jit_icall_by_info (MonoCompile *cfg, MonoJitICallInfo *info, MonoInst **args)
2960 * Call the jit icall without a wrapper if possible.
2961 * The wrapper is needed for the following reasons:
2962 * - to handle exceptions thrown using mono_raise_exceptions () from the
2963 * icall function. The EH code needs the lmf frame pushed by the
2964 * wrapper to be able to unwind back to managed code.
2965 * - to be able to do stack walks for asynchronously suspended
2966 * threads when debugging.
2968 if (info->no_raise && direct_icalls_enabled (cfg)) {
2972 if (!info->wrapper_method) {
2973 name = g_strdup_printf ("__icall_wrapper_%s", info->name);
2974 info->wrapper_method = mono_marshal_get_icall_wrapper (info->sig, name, info->func, TRUE);
2976 mono_memory_barrier ();
2980 * Inline the wrapper method, which is basically a call to the C icall, and
2981 * an exception check.
2983 costs = inline_method (cfg, info->wrapper_method, NULL,
2984 args, NULL, cfg->real_offset, TRUE);
2985 g_assert (costs > 0);
2986 g_assert (!MONO_TYPE_IS_VOID (info->sig->ret));
2990 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2995 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2997 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2998 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
3002 * Native code might return non register sized integers
3003 * without initializing the upper bits.
3005 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
3006 case OP_LOADI1_MEMBASE:
3007 widen_op = OP_ICONV_TO_I1;
3009 case OP_LOADU1_MEMBASE:
3010 widen_op = OP_ICONV_TO_U1;
3012 case OP_LOADI2_MEMBASE:
3013 widen_op = OP_ICONV_TO_I2;
3015 case OP_LOADU2_MEMBASE:
3016 widen_op = OP_ICONV_TO_U2;
3022 if (widen_op != -1) {
3023 int dreg = alloc_preg (cfg);
3026 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
3027 widen->type = ins->type;
3037 get_memcpy_method (void)
3039 static MonoMethod *memcpy_method = NULL;
3040 if (!memcpy_method) {
3041 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
3043 g_error ("Old corlib found. Install a new one");
3045 return memcpy_method;
3049 create_write_barrier_bitmap (MonoCompile *cfg, MonoClass *klass, unsigned *wb_bitmap, int offset)
3051 MonoClassField *field;
3052 gpointer iter = NULL;
3054 while ((field = mono_class_get_fields (klass, &iter))) {
3057 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
3059 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
3060 if (mini_type_is_reference (mono_field_get_type (field))) {
3061 g_assert ((foffset % SIZEOF_VOID_P) == 0);
3062 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
3064 MonoClass *field_class = mono_class_from_mono_type (field->type);
3065 if (field_class->has_references)
3066 create_write_barrier_bitmap (cfg, field_class, wb_bitmap, offset + foffset);
3072 emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value)
3074 int card_table_shift_bits;
3075 gpointer card_table_mask;
3077 MonoInst *dummy_use;
3078 int nursery_shift_bits;
3079 size_t nursery_size;
3080 gboolean has_card_table_wb = FALSE;
3082 if (!cfg->gen_write_barriers)
3085 card_table = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
3087 mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
3089 #ifdef MONO_ARCH_HAVE_CARD_TABLE_WBARRIER
3090 has_card_table_wb = TRUE;
3093 if (has_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0 && !COMPILE_LLVM (cfg)) {
3096 MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
3097 wbarrier->sreg1 = ptr->dreg;
3098 wbarrier->sreg2 = value->dreg;
3099 MONO_ADD_INS (cfg->cbb, wbarrier);
3100 } else if (card_table && !cfg->compile_aot && !mono_gc_card_table_nursery_check ()) {
3101 int offset_reg = alloc_preg (cfg);
3102 int card_reg = alloc_preg (cfg);
3105 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
3106 if (card_table_mask)
3107 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
3109 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
3110 * IMM's larger than 32bits.
3112 if (cfg->compile_aot) {
3113 MONO_EMIT_NEW_AOTCONST (cfg, card_reg, NULL, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR);
3115 MONO_INST_NEW (cfg, ins, OP_PCONST);
3116 ins->inst_p0 = card_table;
3117 ins->dreg = card_reg;
3118 MONO_ADD_INS (cfg->cbb, ins);
3121 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
3122 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
3124 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
3125 mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
3128 EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
3132 mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
3134 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
3135 unsigned need_wb = 0;
3140 /*types with references can't have alignment smaller than sizeof(void*) */
3141 if (align < SIZEOF_VOID_P)
3144 /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
3145 if (size > 32 * SIZEOF_VOID_P)
3148 create_write_barrier_bitmap (cfg, klass, &need_wb, 0);
3150 /* We don't unroll more than 5 stores to avoid code bloat. */
3151 if (size > 5 * SIZEOF_VOID_P) {
3152 /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
3153 size += (SIZEOF_VOID_P - 1);
3154 size &= ~(SIZEOF_VOID_P - 1);
3156 EMIT_NEW_ICONST (cfg, iargs [2], size);
3157 EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
3158 mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
3162 destreg = iargs [0]->dreg;
3163 srcreg = iargs [1]->dreg;
3166 dest_ptr_reg = alloc_preg (cfg);
3167 tmp_reg = alloc_preg (cfg);
3170 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
3172 while (size >= SIZEOF_VOID_P) {
3173 MonoInst *load_inst;
3174 MONO_INST_NEW (cfg, load_inst, OP_LOAD_MEMBASE);
3175 load_inst->dreg = tmp_reg;
3176 load_inst->inst_basereg = srcreg;
3177 load_inst->inst_offset = offset;
3178 MONO_ADD_INS (cfg->cbb, load_inst);
3180 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
3183 emit_write_barrier (cfg, iargs [0], load_inst);
3185 offset += SIZEOF_VOID_P;
3186 size -= SIZEOF_VOID_P;
3189 /*tmp += sizeof (void*)*/
3190 if (size >= SIZEOF_VOID_P) {
3191 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
3192 MONO_ADD_INS (cfg->cbb, iargs [0]);
3196 /* Those cannot be references since size < sizeof (void*) */
3198 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
3199 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
3205 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
3206 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
3212 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
3213 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
3222 * Emit code to copy a valuetype of type @klass whose address is stored in
3223 * @src->dreg to memory whose address is stored at @dest->dreg.
3226 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
3228 MonoInst *iargs [4];
3231 MonoMethod *memcpy_method;
3232 MonoInst *size_ins = NULL;
3233 MonoInst *memcpy_ins = NULL;
3237 klass = mono_class_from_mono_type (mini_get_underlying_type (&klass->byval_arg));
3240 * This check breaks with spilled vars... need to handle it during verification anyway.
3241 * g_assert (klass && klass == src->klass && klass == dest->klass);
3244 if (mini_is_gsharedvt_klass (klass)) {
3246 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3247 memcpy_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_MEMCPY);
3251 n = mono_class_native_size (klass, &align);
3253 n = mono_class_value_size (klass, &align);
3255 /* if native is true there should be no references in the struct */
3256 if (cfg->gen_write_barriers && (klass->has_references || size_ins) && !native) {
3257 /* Avoid barriers when storing to the stack */
3258 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
3259 (dest->opcode == OP_LDADDR))) {
3265 context_used = mini_class_check_context_used (cfg, klass);
3267 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
3268 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
3270 } else if (context_used) {
3271 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3273 if (cfg->compile_aot) {
3274 EMIT_NEW_CLASSCONST (cfg, iargs [2], klass);
3276 EMIT_NEW_PCONST (cfg, iargs [2], klass);
3277 mono_class_compute_gc_descriptor (klass);
3282 mono_emit_jit_icall (cfg, mono_gsharedvt_value_copy, iargs);
3284 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
3289 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 8) {
3290 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
3291 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
3296 iargs [2] = size_ins;
3298 EMIT_NEW_ICONST (cfg, iargs [2], n);
3300 memcpy_method = get_memcpy_method ();
3302 mono_emit_calli (cfg, mono_method_signature (memcpy_method), iargs, memcpy_ins, NULL, NULL);
3304 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
3309 get_memset_method (void)
3311 static MonoMethod *memset_method = NULL;
3312 if (!memset_method) {
3313 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
3315 g_error ("Old corlib found. Install a new one");
3317 return memset_method;
3321 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
3323 MonoInst *iargs [3];
3326 MonoMethod *memset_method;
3327 MonoInst *size_ins = NULL;
3328 MonoInst *bzero_ins = NULL;
3329 static MonoMethod *bzero_method;
3331 /* FIXME: Optimize this for the case when dest is an LDADDR */
3332 mono_class_init (klass);
3333 if (mini_is_gsharedvt_klass (klass)) {
3334 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3335 bzero_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_BZERO);
3337 bzero_method = mono_class_get_method_from_name (mono_defaults.string_class, "bzero_aligned_1", 2);
3338 g_assert (bzero_method);
3340 iargs [1] = size_ins;
3341 mono_emit_calli (cfg, mono_method_signature (bzero_method), iargs, bzero_ins, NULL, NULL);
3345 n = mono_class_value_size (klass, &align);
3347 if (n <= sizeof (gpointer) * 8) {
3348 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
3351 memset_method = get_memset_method ();
3353 EMIT_NEW_ICONST (cfg, iargs [1], 0);
3354 EMIT_NEW_ICONST (cfg, iargs [2], n);
3355 mono_emit_method_call (cfg, memset_method, iargs, NULL);
3362 * Emit IR to return either the this pointer for instance method,
3363 * or the mrgctx for static methods.
3366 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
3368 MonoInst *this = NULL;
3370 g_assert (cfg->gshared);
3372 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
3373 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
3374 !method->klass->valuetype)
3375 EMIT_NEW_ARGLOAD (cfg, this, 0);
3377 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
3378 MonoInst *mrgctx_loc, *mrgctx_var;
3381 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
3383 mrgctx_loc = mono_get_vtable_var (cfg);
3384 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
3387 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
3388 MonoInst *vtable_loc, *vtable_var;
3392 vtable_loc = mono_get_vtable_var (cfg);
3393 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
3395 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
3396 MonoInst *mrgctx_var = vtable_var;
3399 vtable_reg = alloc_preg (cfg);
3400 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, MONO_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
3401 vtable_var->type = STACK_PTR;
3409 vtable_reg = alloc_preg (cfg);
3410 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3415 static MonoJumpInfoRgctxEntry *
3416 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, MonoRgctxInfoType info_type)
3418 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
3419 res->method = method;
3420 res->in_mrgctx = in_mrgctx;
3421 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
3422 res->data->type = patch_type;
3423 res->data->data.target = patch_data;
3424 res->info_type = info_type;
3432 * Emit IR to load the value of the rgctx entry ENTRY from the rgctx
3435 static inline MonoInst*
3436 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3438 /* Inline version, not currently used */
3439 // FIXME: This can be called from mono_decompose_vtype_opts (), which can't create new bblocks
3441 int i, slot, depth, index, rgctx_reg, val_reg, res_reg;
3443 MonoBasicBlock *is_null_bb, *end_bb;
3444 MonoInst *res, *ins, *call;
3447 slot = mini_get_rgctx_entry_slot (entry);
3449 mrgctx = MONO_RGCTX_SLOT_IS_MRGCTX (slot);
3450 index = MONO_RGCTX_SLOT_INDEX (slot);
3452 index += MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT / sizeof (gpointer);
3453 for (depth = 0; ; ++depth) {
3454 int size = mono_class_rgctx_get_array_size (depth, mrgctx);
3456 if (index < size - 1)
3461 NEW_BBLOCK (cfg, end_bb);
3462 NEW_BBLOCK (cfg, is_null_bb);
3465 rgctx_reg = rgctx->dreg;
3467 rgctx_reg = alloc_preg (cfg);
3469 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, rgctx_reg, rgctx->dreg, MONO_STRUCT_OFFSET (MonoVTable, runtime_generic_context));
3470 // FIXME: Avoid this check by allocating the table when the vtable is created etc.
3471 NEW_BBLOCK (cfg, is_null_bb);
3473 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rgctx_reg, 0);
3474 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3477 for (i = 0; i < depth; ++i) {
3478 int array_reg = alloc_preg (cfg);
3480 /* load ptr to next array */
3481 if (mrgctx && i == 0)
3482 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, rgctx_reg, MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT);
3484 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, rgctx_reg, 0);
3485 rgctx_reg = array_reg;
3486 /* is the ptr null? */
3487 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rgctx_reg, 0);
3488 /* if yes, jump to actual trampoline */
3489 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3493 val_reg = alloc_preg (cfg);
3494 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, val_reg, rgctx_reg, (index + 1) * sizeof (gpointer));
3495 /* is the slot null? */
3496 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, val_reg, 0);
3497 /* if yes, jump to actual trampoline */
3498 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3501 res_reg = alloc_preg (cfg);
3502 MONO_INST_NEW (cfg, ins, OP_MOVE);
3503 ins->dreg = res_reg;
3504 ins->sreg1 = val_reg;
3505 MONO_ADD_INS (cfg->cbb, ins);
3507 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3510 MONO_START_BB (cfg, is_null_bb);
3512 EMIT_NEW_ICONST (cfg, args [1], index);
3514 call = mono_emit_jit_icall (cfg, mono_fill_method_rgctx, args);
3516 call = mono_emit_jit_icall (cfg, mono_fill_class_rgctx, args);
3517 MONO_INST_NEW (cfg, ins, OP_MOVE);
3518 ins->dreg = res_reg;
3519 ins->sreg1 = call->dreg;
3520 MONO_ADD_INS (cfg->cbb, ins);
3521 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3523 MONO_START_BB (cfg, end_bb);
3527 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
3532 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
3533 MonoClass *klass, MonoRgctxInfoType rgctx_type)
3535 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
3536 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3538 return emit_rgctx_fetch (cfg, rgctx, entry);
3542 emit_get_rgctx_sig (MonoCompile *cfg, int context_used,
3543 MonoMethodSignature *sig, MonoRgctxInfoType rgctx_type)
3545 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_SIGNATURE, sig, rgctx_type);
3546 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3548 return emit_rgctx_fetch (cfg, rgctx, entry);
3552 emit_get_rgctx_gsharedvt_call (MonoCompile *cfg, int context_used,
3553 MonoMethodSignature *sig, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3555 MonoJumpInfoGSharedVtCall *call_info;
3556 MonoJumpInfoRgctxEntry *entry;
3559 call_info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoGSharedVtCall));
3560 call_info->sig = sig;
3561 call_info->method = cmethod;
3563 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_CALL, call_info, rgctx_type);
3564 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3566 return emit_rgctx_fetch (cfg, rgctx, entry);
3570 * emit_get_rgctx_virt_method:
3572 * Return data for method VIRT_METHOD for a receiver of type KLASS.
3575 emit_get_rgctx_virt_method (MonoCompile *cfg, int context_used,
3576 MonoClass *klass, MonoMethod *virt_method, MonoRgctxInfoType rgctx_type)
3578 MonoJumpInfoVirtMethod *info;
3579 MonoJumpInfoRgctxEntry *entry;
3582 info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoVirtMethod));
3583 info->klass = klass;
3584 info->method = virt_method;
3586 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_VIRT_METHOD, info, rgctx_type);
3587 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3589 return emit_rgctx_fetch (cfg, rgctx, entry);
3593 emit_get_rgctx_gsharedvt_method (MonoCompile *cfg, int context_used,
3594 MonoMethod *cmethod, MonoGSharedVtMethodInfo *info)
3596 MonoJumpInfoRgctxEntry *entry;
3599 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_METHOD, info, MONO_RGCTX_INFO_METHOD_GSHAREDVT_INFO);
3600 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3602 return emit_rgctx_fetch (cfg, rgctx, entry);
3606 * emit_get_rgctx_method:
3608 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
3609 * normal constants, else emit a load from the rgctx.
3612 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
3613 MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3615 if (!context_used) {
3618 switch (rgctx_type) {
3619 case MONO_RGCTX_INFO_METHOD:
3620 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
3622 case MONO_RGCTX_INFO_METHOD_RGCTX:
3623 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
3626 g_assert_not_reached ();
3629 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
3630 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3632 return emit_rgctx_fetch (cfg, rgctx, entry);
3637 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
3638 MonoClassField *field, MonoRgctxInfoType rgctx_type)
3640 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
3641 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3643 return emit_rgctx_fetch (cfg, rgctx, entry);
3647 get_gsharedvt_info_slot (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3649 MonoGSharedVtMethodInfo *info = cfg->gsharedvt_info;
3650 MonoRuntimeGenericContextInfoTemplate *template;
3655 for (i = 0; i < info->num_entries; ++i) {
3656 MonoRuntimeGenericContextInfoTemplate *otemplate = &info->entries [i];
3658 if (otemplate->info_type == rgctx_type && otemplate->data == data && rgctx_type != MONO_RGCTX_INFO_LOCAL_OFFSET)
3662 if (info->num_entries == info->count_entries) {
3663 MonoRuntimeGenericContextInfoTemplate *new_entries;
3664 int new_count_entries = info->count_entries ? info->count_entries * 2 : 16;
3666 new_entries = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * new_count_entries);
3668 memcpy (new_entries, info->entries, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
3669 info->entries = new_entries;
3670 info->count_entries = new_count_entries;
3673 idx = info->num_entries;
3674 template = &info->entries [idx];
3675 template->info_type = rgctx_type;
3676 template->data = data;
3678 info->num_entries ++;
3684 * emit_get_gsharedvt_info:
3686 * This is similar to emit_get_rgctx_.., but loads the data from the gsharedvt info var instead of calling an rgctx fetch trampoline.
3689 emit_get_gsharedvt_info (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3694 idx = get_gsharedvt_info_slot (cfg, data, rgctx_type);
3695 /* Load info->entries [idx] */
3696 dreg = alloc_preg (cfg);
3697 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, cfg->gsharedvt_info_var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
3703 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type)
3705 return emit_get_gsharedvt_info (cfg, &klass->byval_arg, rgctx_type);
3709 * On return the caller must check @klass for load errors.
3712 emit_class_init (MonoCompile *cfg, MonoClass *klass)
3714 MonoInst *vtable_arg;
3716 gboolean use_op_generic_class_init = FALSE;
3718 context_used = mini_class_check_context_used (cfg, klass);
3721 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
3722 klass, MONO_RGCTX_INFO_VTABLE);
3724 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3728 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
3731 #ifdef MONO_ARCH_HAVE_OP_GENERIC_CLASS_INIT
3732 if (!COMPILE_LLVM (cfg))
3733 use_op_generic_class_init = TRUE;
3736 if (use_op_generic_class_init) {
3740 * Using an opcode instead of emitting IR here allows the hiding of the call inside the opcode,
3741 * so this doesn't have to clobber any regs and it doesn't break basic blocks.
3743 MONO_INST_NEW (cfg, ins, OP_GENERIC_CLASS_INIT);
3744 ins->sreg1 = vtable_arg->dreg;
3745 MONO_ADD_INS (cfg->cbb, ins);
3747 static int byte_offset = -1;
3748 static guint8 bitmask;
3749 int bits_reg, inited_reg;
3750 MonoBasicBlock *inited_bb;
3751 MonoInst *args [16];
3753 if (byte_offset < 0)
3754 mono_marshal_find_bitfield_offset (MonoVTable, initialized, &byte_offset, &bitmask);
3756 bits_reg = alloc_ireg (cfg);
3757 inited_reg = alloc_ireg (cfg);
3759 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, bits_reg, vtable_arg->dreg, byte_offset);
3760 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, inited_reg, bits_reg, bitmask);
3762 NEW_BBLOCK (cfg, inited_bb);
3764 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, inited_reg, 0);
3765 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBNE_UN, inited_bb);
3767 args [0] = vtable_arg;
3768 mono_emit_jit_icall (cfg, mono_generic_class_init, args);
3770 MONO_START_BB (cfg, inited_bb);
3775 emit_seq_point (MonoCompile *cfg, MonoMethod *method, guint8* ip, gboolean intr_loc, gboolean nonempty_stack)
3779 if (cfg->gen_seq_points && cfg->method == method) {
3780 NEW_SEQ_POINT (cfg, ins, ip - cfg->header->code, intr_loc);
3782 ins->flags |= MONO_INST_NONEMPTY_STACK;
3783 MONO_ADD_INS (cfg->cbb, ins);
3788 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg, gboolean null_check)
3790 if (mini_get_debug_options ()->better_cast_details) {
3791 int vtable_reg = alloc_preg (cfg);
3792 int klass_reg = alloc_preg (cfg);
3793 MonoBasicBlock *is_null_bb = NULL;
3795 int to_klass_reg, context_used;
3798 NEW_BBLOCK (cfg, is_null_bb);
3800 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3801 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3804 tls_get = mono_get_jit_tls_intrinsic (cfg);
3806 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3810 MONO_ADD_INS (cfg->cbb, tls_get);
3811 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3812 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3814 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3816 context_used = mini_class_check_context_used (cfg, klass);
3818 MonoInst *class_ins;
3820 class_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3821 to_klass_reg = class_ins->dreg;
3823 to_klass_reg = alloc_preg (cfg);
3824 MONO_EMIT_NEW_CLASSCONST (cfg, to_klass_reg, klass);
3826 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3829 MONO_START_BB (cfg, is_null_bb);
3834 reset_cast_details (MonoCompile *cfg)
3836 /* Reset the variables holding the cast details */
3837 if (mini_get_debug_options ()->better_cast_details) {
3838 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3840 MONO_ADD_INS (cfg->cbb, tls_get);
3841 /* It is enough to reset the from field */
3842 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3847 * On return the caller must check @array_class for load errors
3850 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
3852 int vtable_reg = alloc_preg (cfg);
3855 context_used = mini_class_check_context_used (cfg, array_class);
3857 save_cast_details (cfg, array_class, obj->dreg, FALSE);
3859 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3861 if (cfg->opt & MONO_OPT_SHARED) {
3862 int class_reg = alloc_preg (cfg);
3863 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3864 if (cfg->compile_aot) {
3865 int klass_reg = alloc_preg (cfg);
3866 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
3867 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
3869 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
3871 } else if (context_used) {
3872 MonoInst *vtable_ins;
3874 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
3875 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
3877 if (cfg->compile_aot) {
3881 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3883 vt_reg = alloc_preg (cfg);
3884 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
3885 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
3888 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3890 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
3894 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
3896 reset_cast_details (cfg);
3900 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
3901 * generic code is generated.
3904 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
3906 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
3909 MonoInst *rgctx, *addr;
3911 /* FIXME: What if the class is shared? We might not
3912 have to get the address of the method from the
3914 addr = emit_get_rgctx_method (cfg, context_used, method,
3915 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3917 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3919 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
3921 gboolean pass_vtable, pass_mrgctx;
3922 MonoInst *rgctx_arg = NULL;
3924 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
3925 g_assert (!pass_mrgctx);
3928 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3931 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
3934 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
3939 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
3943 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
3944 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
3945 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
3946 int rank_reg = alloc_dreg (cfg ,STACK_I4);
3948 obj_reg = sp [0]->dreg;
3949 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3950 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
3952 /* FIXME: generics */
3953 g_assert (klass->rank == 0);
3956 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
3957 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3959 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3960 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, element_class));
3963 MonoInst *element_class;
3965 /* This assertion is from the unboxcast insn */
3966 g_assert (klass->rank == 0);
3968 element_class = emit_get_rgctx_klass (cfg, context_used,
3969 klass, MONO_RGCTX_INFO_ELEMENT_KLASS);
3971 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
3972 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3974 save_cast_details (cfg, klass->element_class, obj_reg, FALSE);
3975 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
3976 reset_cast_details (cfg);
3979 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), obj_reg, sizeof (MonoObject));
3980 MONO_ADD_INS (cfg->cbb, add);
3981 add->type = STACK_MP;
3988 handle_unbox_gsharedvt (MonoCompile *cfg, MonoClass *klass, MonoInst *obj)
3990 MonoInst *addr, *klass_inst, *is_ref, *args[16];
3991 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
3995 klass_inst = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_KLASS);
4001 args [1] = klass_inst;
4004 obj = mono_emit_jit_icall (cfg, mono_object_castclass_unbox, args);
4006 NEW_BBLOCK (cfg, is_ref_bb);
4007 NEW_BBLOCK (cfg, is_nullable_bb);
4008 NEW_BBLOCK (cfg, end_bb);
4009 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
4010 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 1);
4011 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
4013 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 2);
4014 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
4016 /* This will contain either the address of the unboxed vtype, or an address of the temporary where the ref is stored */
4017 addr_reg = alloc_dreg (cfg, STACK_MP);
4021 NEW_BIALU_IMM (cfg, addr, OP_ADD_IMM, addr_reg, obj->dreg, sizeof (MonoObject));
4022 MONO_ADD_INS (cfg->cbb, addr);
4024 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4027 MONO_START_BB (cfg, is_ref_bb);
4029 /* Save the ref to a temporary */
4030 dreg = alloc_ireg (cfg);
4031 EMIT_NEW_VARLOADA_VREG (cfg, addr, dreg, &klass->byval_arg);
4032 addr->dreg = addr_reg;
4033 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, obj->dreg);
4034 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4037 MONO_START_BB (cfg, is_nullable_bb);
4040 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_NULLABLE_CLASS_UNBOX);
4041 MonoInst *unbox_call;
4042 MonoMethodSignature *unbox_sig;
4044 unbox_sig = mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
4045 unbox_sig->ret = &klass->byval_arg;
4046 unbox_sig->param_count = 1;
4047 unbox_sig->params [0] = &mono_defaults.object_class->byval_arg;
4048 unbox_call = mono_emit_calli (cfg, unbox_sig, &obj, addr, NULL, NULL);
4050 EMIT_NEW_VARLOADA_VREG (cfg, addr, unbox_call->dreg, &klass->byval_arg);
4051 addr->dreg = addr_reg;
4054 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4057 MONO_START_BB (cfg, end_bb);
4060 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr_reg, 0);
4066 * Returns NULL and set the cfg exception on error.
4069 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
4071 MonoInst *iargs [2];
4077 MonoInst *iargs [2];
4078 gboolean known_instance_size = !mini_is_gsharedvt_klass (klass);
4080 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (klass, for_box, known_instance_size);
4082 if (cfg->opt & MONO_OPT_SHARED)
4083 rgctx_info = MONO_RGCTX_INFO_KLASS;
4085 rgctx_info = MONO_RGCTX_INFO_VTABLE;
4086 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
4088 if (cfg->opt & MONO_OPT_SHARED) {
4089 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4091 alloc_ftn = mono_object_new;
4094 alloc_ftn = mono_object_new_specific;
4097 if (managed_alloc && !(cfg->opt & MONO_OPT_SHARED)) {
4098 if (known_instance_size) {
4099 int size = mono_class_instance_size (klass);
4100 if (size < sizeof (MonoObject))
4101 g_error ("Invalid size %d for class %s", size, mono_type_get_full_name (klass));
4103 EMIT_NEW_ICONST (cfg, iargs [1], mono_gc_get_aligned_size_for_allocator (size));
4105 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
4108 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
4111 if (cfg->opt & MONO_OPT_SHARED) {
4112 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4113 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
4115 alloc_ftn = mono_object_new;
4116 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
4117 /* This happens often in argument checking code, eg. throw new FooException... */
4118 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
4119 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
4120 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
4122 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
4123 MonoMethod *managed_alloc = NULL;
4127 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4128 cfg->exception_ptr = klass;
4132 managed_alloc = mono_gc_get_managed_allocator (klass, for_box, TRUE);
4134 if (managed_alloc) {
4135 int size = mono_class_instance_size (klass);
4136 if (size < sizeof (MonoObject))
4137 g_error ("Invalid size %d for class %s", size, mono_type_get_full_name (klass));
4139 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4140 EMIT_NEW_ICONST (cfg, iargs [1], mono_gc_get_aligned_size_for_allocator (size));
4141 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
4143 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
4145 guint32 lw = vtable->klass->instance_size;
4146 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
4147 EMIT_NEW_ICONST (cfg, iargs [0], lw);
4148 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
4151 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4155 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
4159 * Returns NULL and set the cfg exception on error.
4162 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used)
4164 MonoInst *alloc, *ins;
4166 if (mono_class_is_nullable (klass)) {
4167 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
4170 /* FIXME: What if the class is shared? We might not
4171 have to get the method address from the RGCTX. */
4172 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
4173 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
4174 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
4176 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
4178 gboolean pass_vtable, pass_mrgctx;
4179 MonoInst *rgctx_arg = NULL;
4181 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
4182 g_assert (!pass_mrgctx);
4185 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4188 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
4191 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
4195 if (mini_is_gsharedvt_klass (klass)) {
4196 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
4197 MonoInst *res, *is_ref, *src_var, *addr;
4200 dreg = alloc_ireg (cfg);
4202 NEW_BBLOCK (cfg, is_ref_bb);
4203 NEW_BBLOCK (cfg, is_nullable_bb);
4204 NEW_BBLOCK (cfg, end_bb);
4205 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
4206 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 1);
4207 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
4209 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 2);
4210 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
4213 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4216 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4217 ins->opcode = OP_STOREV_MEMBASE;
4219 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, alloc->dreg);
4220 res->type = STACK_OBJ;
4222 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4225 MONO_START_BB (cfg, is_ref_bb);
4227 /* val is a vtype, so has to load the value manually */
4228 src_var = get_vreg_to_inst (cfg, val->dreg);
4230 src_var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, val->dreg);
4231 EMIT_NEW_VARLOADA (cfg, addr, src_var, src_var->inst_vtype);
4232 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, addr->dreg, 0);
4233 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4236 MONO_START_BB (cfg, is_nullable_bb);
4239 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass,
4240 MONO_RGCTX_INFO_NULLABLE_CLASS_BOX);
4242 MonoMethodSignature *box_sig;
4245 * klass is Nullable<T>, need to call Nullable<T>.Box () using a gsharedvt signature, but we cannot
4246 * construct that method at JIT time, so have to do things by hand.
4248 box_sig = mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
4249 box_sig->ret = &mono_defaults.object_class->byval_arg;
4250 box_sig->param_count = 1;
4251 box_sig->params [0] = &klass->byval_arg;
4252 box_call = mono_emit_calli (cfg, box_sig, &val, addr, NULL, NULL);
4253 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, box_call->dreg);
4254 res->type = STACK_OBJ;
4258 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4260 MONO_START_BB (cfg, end_bb);
4264 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4268 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4274 mini_class_has_reference_variant_generic_argument (MonoCompile *cfg, MonoClass *klass, int context_used)
4277 MonoGenericContainer *container;
4278 MonoGenericInst *ginst;
4280 if (klass->generic_class) {
4281 container = klass->generic_class->container_class->generic_container;
4282 ginst = klass->generic_class->context.class_inst;
4283 } else if (klass->generic_container && context_used) {
4284 container = klass->generic_container;
4285 ginst = container->context.class_inst;
4290 for (i = 0; i < container->type_argc; ++i) {
4292 if (!(mono_generic_container_get_param_info (container, i)->flags & (MONO_GEN_PARAM_VARIANT|MONO_GEN_PARAM_COVARIANT)))
4294 type = ginst->type_argv [i];
4295 if (mini_type_is_reference (type))
4301 static GHashTable* direct_icall_type_hash;
4304 icall_is_direct_callable (MonoCompile *cfg, MonoMethod *cmethod)
4306 /* LLVM on amd64 can't handle calls to non-32 bit addresses */
4307 if (!direct_icalls_enabled (cfg))
4311 * An icall is directly callable if it doesn't directly or indirectly call mono_raise_exception ().
4312 * Whitelist a few icalls for now.
4314 if (!direct_icall_type_hash) {
4315 GHashTable *h = g_hash_table_new (g_str_hash, g_str_equal);
4317 g_hash_table_insert (h, (char*)"Decimal", GUINT_TO_POINTER (1));
4318 g_hash_table_insert (h, (char*)"Number", GUINT_TO_POINTER (1));
4319 g_hash_table_insert (h, (char*)"Buffer", GUINT_TO_POINTER (1));
4320 mono_memory_barrier ();
4321 direct_icall_type_hash = h;
4324 if (cmethod->klass == mono_defaults.math_class)
4326 /* No locking needed */
4327 if (cmethod->klass->image == mono_defaults.corlib && g_hash_table_lookup (direct_icall_type_hash, cmethod->klass->name))
4332 #define is_complex_isinst(klass) ((klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || mono_class_is_marshalbyref (klass) || (klass->flags & TYPE_ATTRIBUTE_SEALED) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
4335 emit_castclass_with_cache (MonoCompile *cfg, MonoClass *klass, MonoInst **args)
4337 MonoMethod *mono_castclass;
4340 mono_castclass = mono_marshal_get_castclass_with_cache ();
4342 save_cast_details (cfg, klass, args [0]->dreg, TRUE);
4343 res = mono_emit_method_call (cfg, mono_castclass, args, NULL);
4344 reset_cast_details (cfg);
4350 get_castclass_cache_idx (MonoCompile *cfg)
4352 /* Each CASTCLASS_CACHE patch needs a unique index which identifies the call site */
4353 cfg->castclass_cache_index ++;
4354 return (cfg->method_index << 16) | cfg->castclass_cache_index;
4358 emit_castclass_with_cache_nonshared (MonoCompile *cfg, MonoInst *obj, MonoClass *klass)
4367 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
4370 if (cfg->compile_aot) {
4371 idx = get_castclass_cache_idx (cfg);
4372 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, GINT_TO_POINTER (idx));
4374 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
4377 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
4378 return emit_castclass_with_cache (cfg, klass, args);
4382 * Returns NULL and set the cfg exception on error.
4385 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, guint8 *ip, int *inline_costs)
4387 MonoBasicBlock *is_null_bb;
4388 int obj_reg = src->dreg;
4389 int vtable_reg = alloc_preg (cfg);
4391 MonoInst *klass_inst = NULL, *res;
4393 context_used = mini_class_check_context_used (cfg, klass);
4395 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
4396 res = emit_castclass_with_cache_nonshared (cfg, src, klass);
4397 (*inline_costs) += 2;
4399 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
4400 MonoMethod *mono_castclass;
4401 MonoInst *iargs [1];
4404 mono_castclass = mono_marshal_get_castclass (klass);
4407 save_cast_details (cfg, klass, src->dreg, TRUE);
4408 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
4409 iargs, ip, cfg->real_offset, TRUE);
4410 reset_cast_details (cfg);
4411 CHECK_CFG_EXCEPTION;
4412 g_assert (costs > 0);
4414 cfg->real_offset += 5;
4416 (*inline_costs) += costs;
4424 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
4425 MonoInst *cache_ins;
4427 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
4432 /* klass - it's the second element of the cache entry*/
4433 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
4436 args [2] = cache_ins;
4438 return emit_castclass_with_cache (cfg, klass, args);
4441 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
4444 NEW_BBLOCK (cfg, is_null_bb);
4446 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4447 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
4449 save_cast_details (cfg, klass, obj_reg, FALSE);
4451 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4452 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4453 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
4455 int klass_reg = alloc_preg (cfg);
4457 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4459 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4460 /* the remoting code is broken, access the class for now */
4461 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4462 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4464 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4465 cfg->exception_ptr = klass;
4468 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4470 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4471 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4473 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
4475 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4476 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
4480 MONO_START_BB (cfg, is_null_bb);
4482 reset_cast_details (cfg);
4491 * Returns NULL and set the cfg exception on error.
4494 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
4497 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
4498 int obj_reg = src->dreg;
4499 int vtable_reg = alloc_preg (cfg);
4500 int res_reg = alloc_ireg_ref (cfg);
4501 MonoInst *klass_inst = NULL;
4506 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
4507 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
4508 MonoInst *cache_ins;
4510 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
4515 /* klass - it's the second element of the cache entry*/
4516 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
4519 args [2] = cache_ins;
4521 return mono_emit_method_call (cfg, mono_isinst, args, NULL);
4524 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
4527 NEW_BBLOCK (cfg, is_null_bb);
4528 NEW_BBLOCK (cfg, false_bb);
4529 NEW_BBLOCK (cfg, end_bb);
4531 /* Do the assignment at the beginning, so the other assignment can be if converted */
4532 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
4533 ins->type = STACK_OBJ;
4536 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4537 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
4539 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4541 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4542 g_assert (!context_used);
4543 /* the is_null_bb target simply copies the input register to the output */
4544 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
4546 int klass_reg = alloc_preg (cfg);
4549 int rank_reg = alloc_preg (cfg);
4550 int eclass_reg = alloc_preg (cfg);
4552 g_assert (!context_used);
4553 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
4554 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
4555 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4556 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4557 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, cast_class));
4558 if (klass->cast_class == mono_defaults.object_class) {
4559 int parent_reg = alloc_preg (cfg);
4560 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, MONO_STRUCT_OFFSET (MonoClass, parent));
4561 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
4562 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4563 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4564 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
4565 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
4566 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4567 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4568 } else if (klass->cast_class == mono_defaults.enum_class) {
4569 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4570 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4571 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
4572 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4574 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
4575 /* Check that the object is a vector too */
4576 int bounds_reg = alloc_preg (cfg);
4577 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, MONO_STRUCT_OFFSET (MonoArray, bounds));
4578 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
4579 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4582 /* the is_null_bb target simply copies the input register to the output */
4583 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4585 } else if (mono_class_is_nullable (klass)) {
4586 g_assert (!context_used);
4587 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4588 /* the is_null_bb target simply copies the input register to the output */
4589 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
4591 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4592 g_assert (!context_used);
4593 /* the remoting code is broken, access the class for now */
4594 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4595 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4597 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4598 cfg->exception_ptr = klass;
4601 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4603 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4604 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4606 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4607 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
4609 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4610 /* the is_null_bb target simply copies the input register to the output */
4611 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
4616 MONO_START_BB (cfg, false_bb);
4618 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
4619 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4621 MONO_START_BB (cfg, is_null_bb);
4623 MONO_START_BB (cfg, end_bb);
4629 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4631 /* This opcode takes as input an object reference and a class, and returns:
4632 0) if the object is an instance of the class,
4633 1) if the object is not instance of the class,
4634 2) if the object is a proxy whose type cannot be determined */
4637 #ifndef DISABLE_REMOTING
4638 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
4640 MonoBasicBlock *true_bb, *false_bb, *end_bb;
4642 int obj_reg = src->dreg;
4643 int dreg = alloc_ireg (cfg);
4645 #ifndef DISABLE_REMOTING
4646 int klass_reg = alloc_preg (cfg);
4649 NEW_BBLOCK (cfg, true_bb);
4650 NEW_BBLOCK (cfg, false_bb);
4651 NEW_BBLOCK (cfg, end_bb);
4652 #ifndef DISABLE_REMOTING
4653 NEW_BBLOCK (cfg, false2_bb);
4654 NEW_BBLOCK (cfg, no_proxy_bb);
4657 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4658 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
4660 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4661 #ifndef DISABLE_REMOTING
4662 NEW_BBLOCK (cfg, interface_fail_bb);
4665 tmp_reg = alloc_preg (cfg);
4666 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4667 #ifndef DISABLE_REMOTING
4668 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
4669 MONO_START_BB (cfg, interface_fail_bb);
4670 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4672 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
4674 tmp_reg = alloc_preg (cfg);
4675 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4676 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4677 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
4679 mini_emit_iface_cast (cfg, tmp_reg, klass, false_bb, true_bb);
4682 #ifndef DISABLE_REMOTING
4683 tmp_reg = alloc_preg (cfg);
4684 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4685 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4687 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4688 tmp_reg = alloc_preg (cfg);
4689 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4690 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4692 tmp_reg = alloc_preg (cfg);
4693 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4694 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4695 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4697 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
4698 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
4700 MONO_START_BB (cfg, no_proxy_bb);
4702 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
4704 g_error ("transparent proxy support is disabled while trying to JIT code that uses it");
4708 MONO_START_BB (cfg, false_bb);
4710 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4711 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4713 #ifndef DISABLE_REMOTING
4714 MONO_START_BB (cfg, false2_bb);
4716 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
4717 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4720 MONO_START_BB (cfg, true_bb);
4722 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4724 MONO_START_BB (cfg, end_bb);
4727 MONO_INST_NEW (cfg, ins, OP_ICONST);
4729 ins->type = STACK_I4;
4735 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4737 /* This opcode takes as input an object reference and a class, and returns:
4738 0) if the object is an instance of the class,
4739 1) if the object is a proxy whose type cannot be determined
4740 an InvalidCastException exception is thrown otherwhise*/
4743 #ifndef DISABLE_REMOTING
4744 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
4746 MonoBasicBlock *ok_result_bb;
4748 int obj_reg = src->dreg;
4749 int dreg = alloc_ireg (cfg);
4750 int tmp_reg = alloc_preg (cfg);
4752 #ifndef DISABLE_REMOTING
4753 int klass_reg = alloc_preg (cfg);
4754 NEW_BBLOCK (cfg, end_bb);
4757 NEW_BBLOCK (cfg, ok_result_bb);
4759 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4760 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
4762 save_cast_details (cfg, klass, obj_reg, FALSE);
4764 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4765 #ifndef DISABLE_REMOTING
4766 NEW_BBLOCK (cfg, interface_fail_bb);
4768 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4769 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
4770 MONO_START_BB (cfg, interface_fail_bb);
4771 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4773 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
4775 tmp_reg = alloc_preg (cfg);
4776 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4777 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4778 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
4780 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4781 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4783 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4784 mini_emit_iface_cast (cfg, tmp_reg, klass, NULL, NULL);
4785 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, ok_result_bb);
4788 #ifndef DISABLE_REMOTING
4789 NEW_BBLOCK (cfg, no_proxy_bb);
4791 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4792 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4793 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4795 tmp_reg = alloc_preg (cfg);
4796 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4797 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4799 tmp_reg = alloc_preg (cfg);
4800 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4801 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4802 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4804 NEW_BBLOCK (cfg, fail_1_bb);
4806 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
4808 MONO_START_BB (cfg, fail_1_bb);
4810 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4811 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4813 MONO_START_BB (cfg, no_proxy_bb);
4815 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
4817 g_error ("Transparent proxy support is disabled while trying to JIT code that uses it");
4821 MONO_START_BB (cfg, ok_result_bb);
4823 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4825 #ifndef DISABLE_REMOTING
4826 MONO_START_BB (cfg, end_bb);
4830 MONO_INST_NEW (cfg, ins, OP_ICONST);
4832 ins->type = STACK_I4;
4837 static G_GNUC_UNUSED MonoInst*
4838 handle_enum_has_flag (MonoCompile *cfg, MonoClass *klass, MonoInst *enum_this, MonoInst *enum_flag)
4840 MonoType *enum_type = mono_type_get_underlying_type (&klass->byval_arg);
4841 guint32 load_opc = mono_type_to_load_membase (cfg, enum_type);
4844 switch (enum_type->type) {
4847 #if SIZEOF_REGISTER == 8
4859 MonoInst *load, *and, *cmp, *ceq;
4860 int enum_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
4861 int and_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
4862 int dest_reg = alloc_ireg (cfg);
4864 EMIT_NEW_LOAD_MEMBASE (cfg, load, load_opc, enum_reg, enum_this->dreg, 0);
4865 EMIT_NEW_BIALU (cfg, and, is_i4 ? OP_IAND : OP_LAND, and_reg, enum_reg, enum_flag->dreg);
4866 EMIT_NEW_BIALU (cfg, cmp, is_i4 ? OP_ICOMPARE : OP_LCOMPARE, -1, and_reg, enum_flag->dreg);
4867 EMIT_NEW_UNALU (cfg, ceq, is_i4 ? OP_ICEQ : OP_LCEQ, dest_reg, -1);
4869 ceq->type = STACK_I4;
4872 load = mono_decompose_opcode (cfg, load);
4873 and = mono_decompose_opcode (cfg, and);
4874 cmp = mono_decompose_opcode (cfg, cmp);
4875 ceq = mono_decompose_opcode (cfg, ceq);
4883 * Returns NULL and set the cfg exception on error.
4885 static G_GNUC_UNUSED MonoInst*
4886 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used, gboolean virtual)
4890 gpointer trampoline;
4891 MonoInst *obj, *method_ins, *tramp_ins;
4896 MonoMethod *invoke = mono_get_delegate_invoke (klass);
4899 if (!mono_get_delegate_virtual_invoke_impl (mono_method_signature (invoke), context_used ? NULL : method))
4903 obj = handle_alloc (cfg, klass, FALSE, 0);
4907 /* Inline the contents of mono_delegate_ctor */
4909 /* Set target field */
4910 /* Optimize away setting of NULL target */
4911 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0)) {
4912 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
4913 if (cfg->gen_write_barriers) {
4914 dreg = alloc_preg (cfg);
4915 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target));
4916 emit_write_barrier (cfg, ptr, target);
4920 /* Set method field */
4921 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
4922 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
4925 * To avoid looking up the compiled code belonging to the target method
4926 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
4927 * store it, and we fill it after the method has been compiled.
4929 if (!method->dynamic && !(cfg->opt & MONO_OPT_SHARED)) {
4930 MonoInst *code_slot_ins;
4933 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
4935 domain = mono_domain_get ();
4936 mono_domain_lock (domain);
4937 if (!domain_jit_info (domain)->method_code_hash)
4938 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
4939 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
4941 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
4942 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
4944 mono_domain_unlock (domain);
4946 if (cfg->compile_aot)
4947 EMIT_NEW_AOTCONST (cfg, code_slot_ins, MONO_PATCH_INFO_METHOD_CODE_SLOT, method);
4949 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
4951 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
4954 if (cfg->compile_aot) {
4955 MonoDelegateClassMethodPair *del_tramp;
4957 del_tramp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoDelegateClassMethodPair));
4958 del_tramp->klass = klass;
4959 del_tramp->method = context_used ? NULL : method;
4960 del_tramp->virtual = virtual;
4961 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, del_tramp);
4964 trampoline = mono_create_delegate_virtual_trampoline (cfg->domain, klass, context_used ? NULL : method);
4966 trampoline = mono_create_delegate_trampoline_info (cfg->domain, klass, context_used ? NULL : method);
4967 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
4970 /* Set invoke_impl field */
4972 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
4974 dreg = alloc_preg (cfg);
4975 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, invoke_impl));
4976 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), dreg);
4978 dreg = alloc_preg (cfg);
4979 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, method_ptr));
4980 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr), dreg);
4983 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
4989 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
4991 MonoJitICallInfo *info;
4993 /* Need to register the icall so it gets an icall wrapper */
4994 info = mono_get_array_new_va_icall (rank);
4996 cfg->flags |= MONO_CFG_HAS_VARARGS;
4998 /* mono_array_new_va () needs a vararg calling convention */
4999 cfg->disable_llvm = TRUE;
5001 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
5002 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
5006 * handle_constrained_gsharedvt_call:
5008 * Handle constrained calls where the receiver is a gsharedvt type.
5009 * Return the instruction representing the call. Set the cfg exception on failure.
5012 handle_constrained_gsharedvt_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp, MonoClass *constrained_class,
5013 gboolean *ref_emit_widen)
5015 MonoInst *ins = NULL;
5016 gboolean emit_widen = *ref_emit_widen;
5019 * Constrained calls need to behave differently at runtime dependending on whenever the receiver is instantiated as ref type or as a vtype.
5020 * This is hard to do with the current call code, since we would have to emit a branch and two different calls. So instead, we
5021 * pack the arguments into an array, and do the rest of the work in in an icall.
5023 if (((cmethod->klass == mono_defaults.object_class) || (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) || (!cmethod->klass->valuetype && cmethod->klass->image != mono_defaults.corlib)) &&
5024 (MONO_TYPE_IS_VOID (fsig->ret) || MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_IS_REFERENCE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret) || mini_is_gsharedvt_type (fsig->ret)) &&
5025 (fsig->param_count == 0 || (!fsig->hasthis && fsig->param_count == 1) || (fsig->param_count == 1 && (MONO_TYPE_IS_REFERENCE (fsig->params [0]) || fsig->params [0]->byref || mini_is_gsharedvt_type (fsig->params [0]))))) {
5026 MonoInst *args [16];
5029 * This case handles calls to
5030 * - object:ToString()/Equals()/GetHashCode(),
5031 * - System.IComparable<T>:CompareTo()
5032 * - System.IEquatable<T>:Equals ()
5033 * plus some simple interface calls enough to support AsyncTaskMethodBuilder.
5037 if (mono_method_check_context_used (cmethod))
5038 args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (cmethod), cmethod, MONO_RGCTX_INFO_METHOD);
5040 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
5041 args [2] = emit_get_rgctx_klass (cfg, mono_class_check_context_used (constrained_class), constrained_class, MONO_RGCTX_INFO_KLASS);
5043 /* !fsig->hasthis is for the wrapper for the Object.GetType () icall */
5044 if (fsig->hasthis && fsig->param_count) {
5045 /* Pass the arguments using a localloc-ed array using the format expected by runtime_invoke () */
5046 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
5047 ins->dreg = alloc_preg (cfg);
5048 ins->inst_imm = fsig->param_count * sizeof (mgreg_t);
5049 MONO_ADD_INS (cfg->cbb, ins);
5052 if (mini_is_gsharedvt_type (fsig->params [0])) {
5055 args [3] = emit_get_gsharedvt_info_klass (cfg, mono_class_from_mono_type (fsig->params [0]), MONO_RGCTX_INFO_CLASS_BOX_TYPE);
5057 EMIT_NEW_VARLOADA_VREG (cfg, ins, sp [1]->dreg, fsig->params [0]);
5058 addr_reg = ins->dreg;
5059 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, addr_reg);
5061 EMIT_NEW_ICONST (cfg, args [3], 0);
5062 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, sp [1]->dreg);
5065 EMIT_NEW_ICONST (cfg, args [3], 0);
5066 EMIT_NEW_ICONST (cfg, args [4], 0);
5068 ins = mono_emit_jit_icall (cfg, mono_gsharedvt_constrained_call, args);
5071 if (mini_is_gsharedvt_type (fsig->ret)) {
5072 ins = handle_unbox_gsharedvt (cfg, mono_class_from_mono_type (fsig->ret), ins);
5073 } else if (MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret)) {
5077 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), ins->dreg, sizeof (MonoObject));
5078 MONO_ADD_INS (cfg->cbb, add);
5080 NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, add->dreg, 0);
5081 MONO_ADD_INS (cfg->cbb, ins);
5082 /* ins represents the call result */
5085 GSHAREDVT_FAILURE (CEE_CALLVIRT);
5088 *ref_emit_widen = emit_widen;
5097 mono_emit_load_got_addr (MonoCompile *cfg)
5099 MonoInst *getaddr, *dummy_use;
5101 if (!cfg->got_var || cfg->got_var_allocated)
5104 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
5105 getaddr->cil_code = cfg->header->code;
5106 getaddr->dreg = cfg->got_var->dreg;
5108 /* Add it to the start of the first bblock */
5109 if (cfg->bb_entry->code) {
5110 getaddr->next = cfg->bb_entry->code;
5111 cfg->bb_entry->code = getaddr;
5114 MONO_ADD_INS (cfg->bb_entry, getaddr);
5116 cfg->got_var_allocated = TRUE;
5119 * Add a dummy use to keep the got_var alive, since real uses might
5120 * only be generated by the back ends.
5121 * Add it to end_bblock, so the variable's lifetime covers the whole
5123 * It would be better to make the usage of the got var explicit in all
5124 * cases when the backend needs it (i.e. calls, throw etc.), so this
5125 * wouldn't be needed.
5127 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
5128 MONO_ADD_INS (cfg->bb_exit, dummy_use);
5131 static int inline_limit;
5132 static gboolean inline_limit_inited;
5135 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
5137 MonoMethodHeaderSummary header;
5139 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
5140 MonoMethodSignature *sig = mono_method_signature (method);
5144 if (cfg->disable_inline)
5149 if (cfg->inline_depth > 10)
5152 #ifdef MONO_ARCH_HAVE_LMF_OPS
5153 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
5154 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
5155 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
5160 if (!mono_method_get_header_summary (method, &header))
5163 /*runtime, icall and pinvoke are checked by summary call*/
5164 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
5165 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
5166 (mono_class_is_marshalbyref (method->klass)) ||
5170 /* also consider num_locals? */
5171 /* Do the size check early to avoid creating vtables */
5172 if (!inline_limit_inited) {
5173 if (g_getenv ("MONO_INLINELIMIT"))
5174 inline_limit = atoi (g_getenv ("MONO_INLINELIMIT"));
5176 inline_limit = INLINE_LENGTH_LIMIT;
5177 inline_limit_inited = TRUE;
5179 if (header.code_size >= inline_limit && !(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
5183 * if we can initialize the class of the method right away, we do,
5184 * otherwise we don't allow inlining if the class needs initialization,
5185 * since it would mean inserting a call to mono_runtime_class_init()
5186 * inside the inlined code
5188 if (!(cfg->opt & MONO_OPT_SHARED)) {
5189 /* The AggressiveInlining hint is a good excuse to force that cctor to run. */
5190 if (method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING) {
5191 vtable = mono_class_vtable (cfg->domain, method->klass);
5194 if (!cfg->compile_aot)
5195 mono_runtime_class_init (vtable);
5196 } else if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
5197 if (cfg->run_cctors && method->klass->has_cctor) {
5198 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
5199 if (!method->klass->runtime_info)
5200 /* No vtable created yet */
5202 vtable = mono_class_vtable (cfg->domain, method->klass);
5205 /* This makes so that inline cannot trigger */
5206 /* .cctors: too many apps depend on them */
5207 /* running with a specific order... */
5208 if (! vtable->initialized)
5210 mono_runtime_class_init (vtable);
5212 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
5213 if (!method->klass->runtime_info)
5214 /* No vtable created yet */
5216 vtable = mono_class_vtable (cfg->domain, method->klass);
5219 if (!vtable->initialized)
5224 * If we're compiling for shared code
5225 * the cctor will need to be run at aot method load time, for example,
5226 * or at the end of the compilation of the inlining method.
5228 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
5232 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
5233 if (mono_arch_is_soft_float ()) {
5235 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
5237 for (i = 0; i < sig->param_count; ++i)
5238 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
5243 if (g_list_find (cfg->dont_inline, method))
5250 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoClass *klass, MonoVTable *vtable)
5252 if (!cfg->compile_aot) {
5254 if (vtable->initialized)
5258 if (klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
5259 if (cfg->method == method)
5263 if (!mono_class_needs_cctor_run (klass, method))
5266 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (klass == method->klass))
5267 /* The initialization is already done before the method is called */
5274 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
5278 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
5281 if (mini_is_gsharedvt_variable_klass (klass)) {
5284 mono_class_init (klass);
5285 size = mono_class_array_element_size (klass);
5288 mult_reg = alloc_preg (cfg);
5289 array_reg = arr->dreg;
5290 index_reg = index->dreg;
5292 #if SIZEOF_REGISTER == 8
5293 /* The array reg is 64 bits but the index reg is only 32 */
5294 if (COMPILE_LLVM (cfg)) {
5296 index2_reg = index_reg;
5298 index2_reg = alloc_preg (cfg);
5299 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
5302 if (index->type == STACK_I8) {
5303 index2_reg = alloc_preg (cfg);
5304 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
5306 index2_reg = index_reg;
5311 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
5313 #if defined(TARGET_X86) || defined(TARGET_AMD64)
5314 if (size == 1 || size == 2 || size == 4 || size == 8) {
5315 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
5317 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], MONO_STRUCT_OFFSET (MonoArray, vector));
5318 ins->klass = mono_class_get_element_class (klass);
5319 ins->type = STACK_MP;
5325 add_reg = alloc_ireg_mp (cfg);
5328 MonoInst *rgctx_ins;
5331 g_assert (cfg->gshared);
5332 context_used = mini_class_check_context_used (cfg, klass);
5333 g_assert (context_used);
5334 rgctx_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE);
5335 MONO_EMIT_NEW_BIALU (cfg, OP_IMUL, mult_reg, index2_reg, rgctx_ins->dreg);
5337 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
5339 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
5340 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
5341 ins->klass = mono_class_get_element_class (klass);
5342 ins->type = STACK_MP;
5343 MONO_ADD_INS (cfg->cbb, ins);
5348 #ifndef MONO_ARCH_EMULATE_MUL_DIV
5350 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
5352 int bounds_reg = alloc_preg (cfg);
5353 int add_reg = alloc_ireg_mp (cfg);
5354 int mult_reg = alloc_preg (cfg);
5355 int mult2_reg = alloc_preg (cfg);
5356 int low1_reg = alloc_preg (cfg);
5357 int low2_reg = alloc_preg (cfg);
5358 int high1_reg = alloc_preg (cfg);
5359 int high2_reg = alloc_preg (cfg);
5360 int realidx1_reg = alloc_preg (cfg);
5361 int realidx2_reg = alloc_preg (cfg);
5362 int sum_reg = alloc_preg (cfg);
5363 int index1, index2, tmpreg;
5367 mono_class_init (klass);
5368 size = mono_class_array_element_size (klass);
5370 index1 = index_ins1->dreg;
5371 index2 = index_ins2->dreg;
5373 #if SIZEOF_REGISTER == 8
5374 /* The array reg is 64 bits but the index reg is only 32 */
5375 if (COMPILE_LLVM (cfg)) {
5378 tmpreg = alloc_preg (cfg);
5379 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index1);
5381 tmpreg = alloc_preg (cfg);
5382 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index2);
5386 // FIXME: Do we need to do something here for i8 indexes, like in ldelema_1_ins ?
5390 /* range checking */
5391 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
5392 arr->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
5394 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
5395 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5396 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
5397 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
5398 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5399 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
5400 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
5402 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
5403 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5404 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
5405 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
5406 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5407 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
5408 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
5410 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
5411 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
5412 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
5413 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
5414 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
5416 ins->type = STACK_MP;
5418 MONO_ADD_INS (cfg->cbb, ins);
5425 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
5429 MonoMethod *addr_method;
5431 MonoClass *eclass = cmethod->klass->element_class;
5433 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
5436 return mini_emit_ldelema_1_ins (cfg, eclass, sp [0], sp [1], TRUE);
5438 #ifndef MONO_ARCH_EMULATE_MUL_DIV
5439 /* emit_ldelema_2 depends on OP_LMUL */
5440 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS) && !mini_is_gsharedvt_variable_klass (eclass)) {
5441 return mini_emit_ldelema_2_ins (cfg, eclass, sp [0], sp [1], sp [2]);
5445 if (mini_is_gsharedvt_variable_klass (eclass))
5448 element_size = mono_class_array_element_size (eclass);
5449 addr_method = mono_marshal_get_array_address (rank, element_size);
5450 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
5455 static MonoBreakPolicy
5456 always_insert_breakpoint (MonoMethod *method)
5458 return MONO_BREAK_POLICY_ALWAYS;
5461 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
5464 * mono_set_break_policy:
5465 * policy_callback: the new callback function
5467 * Allow embedders to decide wherther to actually obey breakpoint instructions
5468 * (both break IL instructions and Debugger.Break () method calls), for example
5469 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
5470 * untrusted or semi-trusted code.
5472 * @policy_callback will be called every time a break point instruction needs to
5473 * be inserted with the method argument being the method that calls Debugger.Break()
5474 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
5475 * if it wants the breakpoint to not be effective in the given method.
5476 * #MONO_BREAK_POLICY_ALWAYS is the default.
5479 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
5481 if (policy_callback)
5482 break_policy_func = policy_callback;
5484 break_policy_func = always_insert_breakpoint;
5488 should_insert_brekpoint (MonoMethod *method) {
5489 switch (break_policy_func (method)) {
5490 case MONO_BREAK_POLICY_ALWAYS:
5492 case MONO_BREAK_POLICY_NEVER:
5494 case MONO_BREAK_POLICY_ON_DBG:
5495 g_warning ("mdb no longer supported");
5498 g_warning ("Incorrect value returned from break policy callback");
5503 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
5505 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
5507 MonoInst *addr, *store, *load;
5508 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
5510 /* the bounds check is already done by the callers */
5511 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
5513 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
5514 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
5515 if (mini_type_is_reference (fsig->params [2]))
5516 emit_write_barrier (cfg, addr, load);
5518 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
5519 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
5526 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
5528 return mini_type_is_reference (&klass->byval_arg);
5532 emit_array_store (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, gboolean safety_checks)
5534 if (safety_checks && generic_class_is_reference_type (cfg, klass) &&
5535 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
5536 MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class, 1);
5537 MonoMethod *helper = mono_marshal_get_virtual_stelemref (obj_array);
5538 MonoInst *iargs [3];
5541 mono_class_setup_vtable (obj_array);
5542 g_assert (helper->slot);
5544 if (sp [0]->type != STACK_OBJ)
5546 if (sp [2]->type != STACK_OBJ)
5553 return mono_emit_method_call (cfg, helper, iargs, sp [0]);
5557 if (mini_is_gsharedvt_variable_klass (klass)) {
5560 // FIXME-VT: OP_ICONST optimization
5561 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
5562 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5563 ins->opcode = OP_STOREV_MEMBASE;
5564 } else if (sp [1]->opcode == OP_ICONST) {
5565 int array_reg = sp [0]->dreg;
5566 int index_reg = sp [1]->dreg;
5567 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
5570 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
5571 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
5573 MonoInst *addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], safety_checks);
5574 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5575 if (generic_class_is_reference_type (cfg, klass))
5576 emit_write_barrier (cfg, addr, sp [2]);
5583 emit_array_unsafe_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
5588 eklass = mono_class_from_mono_type (fsig->params [2]);
5590 eklass = mono_class_from_mono_type (fsig->ret);
5593 return emit_array_store (cfg, eklass, args, FALSE);
5595 MonoInst *ins, *addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
5596 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &eklass->byval_arg, addr->dreg, 0);
5602 is_unsafe_mov_compatible (MonoCompile *cfg, MonoClass *param_klass, MonoClass *return_klass)
5606 param_klass = mono_class_from_mono_type (mini_get_underlying_type (¶m_klass->byval_arg));
5608 //Only allow for valuetypes
5609 if (!param_klass->valuetype || !return_klass->valuetype)
5613 if (param_klass->has_references || return_klass->has_references)
5616 /* Avoid mixing structs and primitive types/enums, they need to be handled differently in the JIT */
5617 if ((MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && !MONO_TYPE_ISSTRUCT (&return_klass->byval_arg)) ||
5618 (!MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && MONO_TYPE_ISSTRUCT (&return_klass->byval_arg)))
5621 if (param_klass->byval_arg.type == MONO_TYPE_R4 || param_klass->byval_arg.type == MONO_TYPE_R8 ||
5622 return_klass->byval_arg.type == MONO_TYPE_R4 || return_klass->byval_arg.type == MONO_TYPE_R8)
5625 //And have the same size
5626 if (mono_class_value_size (param_klass, &align) != mono_class_value_size (return_klass, &align))
5632 emit_array_unsafe_mov (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args)
5634 MonoClass *param_klass = mono_class_from_mono_type (fsig->params [0]);
5635 MonoClass *return_klass = mono_class_from_mono_type (fsig->ret);
5637 //Valuetypes that are semantically equivalent
5638 if (is_unsafe_mov_compatible (cfg, param_klass, return_klass))
5641 //Arrays of valuetypes that are semantically equivalent
5642 if (param_klass->rank == 1 && return_klass->rank == 1 && is_unsafe_mov_compatible (cfg, param_klass->element_class, return_klass->element_class))
5649 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5651 #ifdef MONO_ARCH_SIMD_INTRINSICS
5652 MonoInst *ins = NULL;
5654 if (cfg->opt & MONO_OPT_SIMD) {
5655 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
5661 return mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
5665 emit_memory_barrier (MonoCompile *cfg, int kind)
5667 MonoInst *ins = NULL;
5668 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
5669 MONO_ADD_INS (cfg->cbb, ins);
5670 ins->backend.memory_barrier_kind = kind;
5676 llvm_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5678 MonoInst *ins = NULL;
5681 /* The LLVM backend supports these intrinsics */
5682 if (cmethod->klass == mono_defaults.math_class) {
5683 if (strcmp (cmethod->name, "Sin") == 0) {
5685 } else if (strcmp (cmethod->name, "Cos") == 0) {
5687 } else if (strcmp (cmethod->name, "Sqrt") == 0) {
5689 } else if (strcmp (cmethod->name, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8) {
5693 if (opcode && fsig->param_count == 1) {
5694 MONO_INST_NEW (cfg, ins, opcode);
5695 ins->type = STACK_R8;
5696 ins->dreg = mono_alloc_freg (cfg);
5697 ins->sreg1 = args [0]->dreg;
5698 MONO_ADD_INS (cfg->cbb, ins);
5702 if (cfg->opt & MONO_OPT_CMOV) {
5703 if (strcmp (cmethod->name, "Min") == 0) {
5704 if (fsig->params [0]->type == MONO_TYPE_I4)
5706 if (fsig->params [0]->type == MONO_TYPE_U4)
5707 opcode = OP_IMIN_UN;
5708 else if (fsig->params [0]->type == MONO_TYPE_I8)
5710 else if (fsig->params [0]->type == MONO_TYPE_U8)
5711 opcode = OP_LMIN_UN;
5712 } else if (strcmp (cmethod->name, "Max") == 0) {
5713 if (fsig->params [0]->type == MONO_TYPE_I4)
5715 if (fsig->params [0]->type == MONO_TYPE_U4)
5716 opcode = OP_IMAX_UN;
5717 else if (fsig->params [0]->type == MONO_TYPE_I8)
5719 else if (fsig->params [0]->type == MONO_TYPE_U8)
5720 opcode = OP_LMAX_UN;
5724 if (opcode && fsig->param_count == 2) {
5725 MONO_INST_NEW (cfg, ins, opcode);
5726 ins->type = fsig->params [0]->type == MONO_TYPE_I4 ? STACK_I4 : STACK_I8;
5727 ins->dreg = mono_alloc_ireg (cfg);
5728 ins->sreg1 = args [0]->dreg;
5729 ins->sreg2 = args [1]->dreg;
5730 MONO_ADD_INS (cfg->cbb, ins);
5738 mini_emit_inst_for_sharable_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5740 if (cmethod->klass == mono_defaults.array_class) {
5741 if (strcmp (cmethod->name, "UnsafeStore") == 0)
5742 return emit_array_unsafe_access (cfg, fsig, args, TRUE);
5743 else if (strcmp (cmethod->name, "UnsafeLoad") == 0)
5744 return emit_array_unsafe_access (cfg, fsig, args, FALSE);
5745 else if (strcmp (cmethod->name, "UnsafeMov") == 0)
5746 return emit_array_unsafe_mov (cfg, fsig, args);
5753 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5755 MonoInst *ins = NULL;
5757 static MonoClass *runtime_helpers_class = NULL;
5758 if (! runtime_helpers_class)
5759 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
5760 "System.Runtime.CompilerServices", "RuntimeHelpers");
5762 if (cmethod->klass == mono_defaults.string_class) {
5763 if (strcmp (cmethod->name, "get_Chars") == 0 && fsig->param_count + fsig->hasthis == 2) {
5764 int dreg = alloc_ireg (cfg);
5765 int index_reg = alloc_preg (cfg);
5766 int add_reg = alloc_preg (cfg);
5768 #if SIZEOF_REGISTER == 8
5769 /* The array reg is 64 bits but the index reg is only 32 */
5770 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
5772 index_reg = args [1]->dreg;
5774 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
5776 #if defined(TARGET_X86) || defined(TARGET_AMD64)
5777 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, MONO_STRUCT_OFFSET (MonoString, chars));
5778 add_reg = ins->dreg;
5779 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5782 int mult_reg = alloc_preg (cfg);
5783 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
5784 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
5785 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5786 add_reg, MONO_STRUCT_OFFSET (MonoString, chars));
5788 type_from_op (cfg, ins, NULL, NULL);
5790 } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count + fsig->hasthis == 1) {
5791 int dreg = alloc_ireg (cfg);
5792 /* Decompose later to allow more optimizations */
5793 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
5794 ins->type = STACK_I4;
5795 ins->flags |= MONO_INST_FAULT;
5796 cfg->cbb->has_array_access = TRUE;
5797 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
5802 } else if (cmethod->klass == mono_defaults.object_class) {
5804 if (strcmp (cmethod->name, "GetType") == 0 && fsig->param_count + fsig->hasthis == 1) {
5805 int dreg = alloc_ireg_ref (cfg);
5806 int vt_reg = alloc_preg (cfg);
5807 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
5808 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, MONO_STRUCT_OFFSET (MonoVTable, type));
5809 type_from_op (cfg, ins, NULL, NULL);
5812 #if !defined(MONO_ARCH_EMULATE_MUL_DIV)
5813 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0 && fsig->param_count == 1 && !mono_gc_is_moving ()) {
5814 int dreg = alloc_ireg (cfg);
5815 int t1 = alloc_ireg (cfg);
5817 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
5818 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
5819 ins->type = STACK_I4;
5823 } else if (strcmp (cmethod->name, ".ctor") == 0 && fsig->param_count == 0) {
5824 MONO_INST_NEW (cfg, ins, OP_NOP);
5825 MONO_ADD_INS (cfg->cbb, ins);
5829 } else if (cmethod->klass == mono_defaults.array_class) {
5830 if (strcmp (cmethod->name, "GetGenericValueImpl") == 0 && fsig->param_count + fsig->hasthis == 3 && !cfg->gsharedvt)
5831 return emit_array_generic_access (cfg, fsig, args, FALSE);
5832 else if (strcmp (cmethod->name, "SetGenericValueImpl") == 0 && fsig->param_count + fsig->hasthis == 3 && !cfg->gsharedvt)
5833 return emit_array_generic_access (cfg, fsig, args, TRUE);
5835 #ifndef MONO_BIG_ARRAYS
5837 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
5840 else if (((strcmp (cmethod->name, "GetLength") == 0 && fsig->param_count + fsig->hasthis == 2) ||
5841 (strcmp (cmethod->name, "GetLowerBound") == 0 && fsig->param_count + fsig->hasthis == 2)) &&
5842 args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
5843 int dreg = alloc_ireg (cfg);
5844 int bounds_reg = alloc_ireg_mp (cfg);
5845 MonoBasicBlock *end_bb, *szarray_bb;
5846 gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
5848 NEW_BBLOCK (cfg, end_bb);
5849 NEW_BBLOCK (cfg, szarray_bb);
5851 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
5852 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
5853 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
5854 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
5855 /* Non-szarray case */
5857 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5858 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5860 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5861 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5862 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
5863 MONO_START_BB (cfg, szarray_bb);
5866 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5867 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
5869 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
5870 MONO_START_BB (cfg, end_bb);
5872 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
5873 ins->type = STACK_I4;
5879 if (cmethod->name [0] != 'g')
5882 if (strcmp (cmethod->name, "get_Rank") == 0 && fsig->param_count + fsig->hasthis == 1) {
5883 int dreg = alloc_ireg (cfg);
5884 int vtable_reg = alloc_preg (cfg);
5885 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
5886 args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
5887 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
5888 vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
5889 type_from_op (cfg, ins, NULL, NULL);
5892 } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count + fsig->hasthis == 1) {
5893 int dreg = alloc_ireg (cfg);
5895 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5896 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
5897 type_from_op (cfg, ins, NULL, NULL);
5902 } else if (cmethod->klass == runtime_helpers_class) {
5904 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0 && fsig->param_count == 0) {
5905 EMIT_NEW_ICONST (cfg, ins, MONO_STRUCT_OFFSET (MonoString, chars));
5909 } else if (cmethod->klass == mono_defaults.thread_class) {
5910 if (strcmp (cmethod->name, "SpinWait_nop") == 0 && fsig->param_count == 0) {
5911 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
5912 MONO_ADD_INS (cfg->cbb, ins);
5914 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0) {
5915 return emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5916 } else if (!strcmp (cmethod->name, "VolatileRead") && fsig->param_count == 1) {
5918 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
5920 if (fsig->params [0]->type == MONO_TYPE_I1)
5921 opcode = OP_LOADI1_MEMBASE;
5922 else if (fsig->params [0]->type == MONO_TYPE_U1)
5923 opcode = OP_LOADU1_MEMBASE;
5924 else if (fsig->params [0]->type == MONO_TYPE_I2)
5925 opcode = OP_LOADI2_MEMBASE;
5926 else if (fsig->params [0]->type == MONO_TYPE_U2)
5927 opcode = OP_LOADU2_MEMBASE;
5928 else if (fsig->params [0]->type == MONO_TYPE_I4)
5929 opcode = OP_LOADI4_MEMBASE;
5930 else if (fsig->params [0]->type == MONO_TYPE_U4)
5931 opcode = OP_LOADU4_MEMBASE;
5932 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
5933 opcode = OP_LOADI8_MEMBASE;
5934 else if (fsig->params [0]->type == MONO_TYPE_R4)
5935 opcode = OP_LOADR4_MEMBASE;
5936 else if (fsig->params [0]->type == MONO_TYPE_R8)
5937 opcode = OP_LOADR8_MEMBASE;
5938 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
5939 opcode = OP_LOAD_MEMBASE;
5942 MONO_INST_NEW (cfg, ins, opcode);
5943 ins->inst_basereg = args [0]->dreg;
5944 ins->inst_offset = 0;
5945 MONO_ADD_INS (cfg->cbb, ins);
5947 switch (fsig->params [0]->type) {
5954 ins->dreg = mono_alloc_ireg (cfg);
5955 ins->type = STACK_I4;
5959 ins->dreg = mono_alloc_lreg (cfg);
5960 ins->type = STACK_I8;
5964 ins->dreg = mono_alloc_ireg (cfg);
5965 #if SIZEOF_REGISTER == 8
5966 ins->type = STACK_I8;
5968 ins->type = STACK_I4;
5973 ins->dreg = mono_alloc_freg (cfg);
5974 ins->type = STACK_R8;
5977 g_assert (mini_type_is_reference (fsig->params [0]));
5978 ins->dreg = mono_alloc_ireg_ref (cfg);
5979 ins->type = STACK_OBJ;
5983 if (opcode == OP_LOADI8_MEMBASE)
5984 ins = mono_decompose_opcode (cfg, ins);
5986 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
5990 } else if (!strcmp (cmethod->name, "VolatileWrite") && fsig->param_count == 2) {
5992 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
5994 if (fsig->params [0]->type == MONO_TYPE_I1 || fsig->params [0]->type == MONO_TYPE_U1)
5995 opcode = OP_STOREI1_MEMBASE_REG;
5996 else if (fsig->params [0]->type == MONO_TYPE_I2 || fsig->params [0]->type == MONO_TYPE_U2)
5997 opcode = OP_STOREI2_MEMBASE_REG;
5998 else if (fsig->params [0]->type == MONO_TYPE_I4 || fsig->params [0]->type == MONO_TYPE_U4)
5999 opcode = OP_STOREI4_MEMBASE_REG;
6000 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
6001 opcode = OP_STOREI8_MEMBASE_REG;
6002 else if (fsig->params [0]->type == MONO_TYPE_R4)
6003 opcode = OP_STORER4_MEMBASE_REG;
6004 else if (fsig->params [0]->type == MONO_TYPE_R8)
6005 opcode = OP_STORER8_MEMBASE_REG;
6006 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
6007 opcode = OP_STORE_MEMBASE_REG;
6010 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
6012 MONO_INST_NEW (cfg, ins, opcode);
6013 ins->sreg1 = args [1]->dreg;
6014 ins->inst_destbasereg = args [0]->dreg;
6015 ins->inst_offset = 0;
6016 MONO_ADD_INS (cfg->cbb, ins);
6018 if (opcode == OP_STOREI8_MEMBASE_REG)
6019 ins = mono_decompose_opcode (cfg, ins);
6024 } else if (cmethod->klass == mono_defaults.monitor_class) {
6025 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
6026 if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 1) {
6029 if (COMPILE_LLVM (cfg)) {
6031 * Pass the argument normally, the LLVM backend will handle the
6032 * calling convention problems.
6034 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
6036 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
6037 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
6038 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
6039 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
6042 return (MonoInst*)call;
6043 #if defined(MONO_ARCH_MONITOR_LOCK_TAKEN_REG)
6044 } else if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 2) {
6047 if (COMPILE_LLVM (cfg)) {
6049 * Pass the argument normally, the LLVM backend will handle the
6050 * calling convention problems.
6052 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER_V4, NULL, helper_sig_monitor_enter_v4_trampoline_llvm, args);
6054 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER_V4,
6055 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
6056 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg, MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
6057 mono_call_inst_add_outarg_reg (cfg, call, args [1]->dreg, MONO_ARCH_MONITOR_LOCK_TAKEN_REG, FALSE);
6060 return (MonoInst*)call;
6062 } else if (strcmp (cmethod->name, "Exit") == 0 && fsig->param_count == 1) {
6065 if (COMPILE_LLVM (cfg)) {
6066 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
6068 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
6069 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
6070 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
6071 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
6074 return (MonoInst*)call;
6077 } else if (cmethod->klass->image == mono_defaults.corlib &&
6078 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
6079 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
6082 #if SIZEOF_REGISTER == 8
6083 if (strcmp (cmethod->name, "Read") == 0 && fsig->param_count == 1 && (fsig->params [0]->type == MONO_TYPE_I8)) {
6084 if (mono_arch_opcode_supported (OP_ATOMIC_LOAD_I8)) {
6085 MONO_INST_NEW (cfg, ins, OP_ATOMIC_LOAD_I8);
6086 ins->dreg = mono_alloc_preg (cfg);
6087 ins->sreg1 = args [0]->dreg;
6088 ins->type = STACK_I8;
6089 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_SEQ;
6090 MONO_ADD_INS (cfg->cbb, ins);
6094 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6096 /* 64 bit reads are already atomic */
6097 MONO_INST_NEW (cfg, load_ins, OP_LOADI8_MEMBASE);
6098 load_ins->dreg = mono_alloc_preg (cfg);
6099 load_ins->inst_basereg = args [0]->dreg;
6100 load_ins->inst_offset = 0;
6101 load_ins->type = STACK_I8;
6102 MONO_ADD_INS (cfg->cbb, load_ins);
6104 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6111 if (strcmp (cmethod->name, "Increment") == 0 && fsig->param_count == 1) {
6112 MonoInst *ins_iconst;
6115 if (fsig->params [0]->type == MONO_TYPE_I4) {
6116 opcode = OP_ATOMIC_ADD_I4;
6117 cfg->has_atomic_add_i4 = TRUE;
6119 #if SIZEOF_REGISTER == 8
6120 else if (fsig->params [0]->type == MONO_TYPE_I8)
6121 opcode = OP_ATOMIC_ADD_I8;
6124 if (!mono_arch_opcode_supported (opcode))
6126 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
6127 ins_iconst->inst_c0 = 1;
6128 ins_iconst->dreg = mono_alloc_ireg (cfg);
6129 MONO_ADD_INS (cfg->cbb, ins_iconst);
6131 MONO_INST_NEW (cfg, ins, opcode);
6132 ins->dreg = mono_alloc_ireg (cfg);
6133 ins->inst_basereg = args [0]->dreg;
6134 ins->inst_offset = 0;
6135 ins->sreg2 = ins_iconst->dreg;
6136 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
6137 MONO_ADD_INS (cfg->cbb, ins);
6139 } else if (strcmp (cmethod->name, "Decrement") == 0 && fsig->param_count == 1) {
6140 MonoInst *ins_iconst;
6143 if (fsig->params [0]->type == MONO_TYPE_I4) {
6144 opcode = OP_ATOMIC_ADD_I4;
6145 cfg->has_atomic_add_i4 = TRUE;
6147 #if SIZEOF_REGISTER == 8
6148 else if (fsig->params [0]->type == MONO_TYPE_I8)
6149 opcode = OP_ATOMIC_ADD_I8;
6152 if (!mono_arch_opcode_supported (opcode))
6154 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
6155 ins_iconst->inst_c0 = -1;
6156 ins_iconst->dreg = mono_alloc_ireg (cfg);
6157 MONO_ADD_INS (cfg->cbb, ins_iconst);
6159 MONO_INST_NEW (cfg, ins, opcode);
6160 ins->dreg = mono_alloc_ireg (cfg);
6161 ins->inst_basereg = args [0]->dreg;
6162 ins->inst_offset = 0;
6163 ins->sreg2 = ins_iconst->dreg;
6164 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
6165 MONO_ADD_INS (cfg->cbb, ins);
6167 } else if (strcmp (cmethod->name, "Add") == 0 && fsig->param_count == 2) {
6170 if (fsig->params [0]->type == MONO_TYPE_I4) {
6171 opcode = OP_ATOMIC_ADD_I4;
6172 cfg->has_atomic_add_i4 = TRUE;
6174 #if SIZEOF_REGISTER == 8
6175 else if (fsig->params [0]->type == MONO_TYPE_I8)
6176 opcode = OP_ATOMIC_ADD_I8;
6179 if (!mono_arch_opcode_supported (opcode))
6181 MONO_INST_NEW (cfg, ins, opcode);
6182 ins->dreg = mono_alloc_ireg (cfg);
6183 ins->inst_basereg = args [0]->dreg;
6184 ins->inst_offset = 0;
6185 ins->sreg2 = args [1]->dreg;
6186 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
6187 MONO_ADD_INS (cfg->cbb, ins);
6190 else if (strcmp (cmethod->name, "Exchange") == 0 && fsig->param_count == 2) {
6191 MonoInst *f2i = NULL, *i2f;
6192 guint32 opcode, f2i_opcode, i2f_opcode;
6193 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
6194 gboolean is_float = fsig->params [0]->type == MONO_TYPE_R4 || fsig->params [0]->type == MONO_TYPE_R8;
6196 if (fsig->params [0]->type == MONO_TYPE_I4 ||
6197 fsig->params [0]->type == MONO_TYPE_R4) {
6198 opcode = OP_ATOMIC_EXCHANGE_I4;
6199 f2i_opcode = OP_MOVE_F_TO_I4;
6200 i2f_opcode = OP_MOVE_I4_TO_F;
6201 cfg->has_atomic_exchange_i4 = TRUE;
6203 #if SIZEOF_REGISTER == 8
6205 fsig->params [0]->type == MONO_TYPE_I8 ||
6206 fsig->params [0]->type == MONO_TYPE_R8 ||
6207 fsig->params [0]->type == MONO_TYPE_I) {
6208 opcode = OP_ATOMIC_EXCHANGE_I8;
6209 f2i_opcode = OP_MOVE_F_TO_I8;
6210 i2f_opcode = OP_MOVE_I8_TO_F;
6213 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I) {
6214 opcode = OP_ATOMIC_EXCHANGE_I4;
6215 cfg->has_atomic_exchange_i4 = TRUE;
6221 if (!mono_arch_opcode_supported (opcode))
6225 /* TODO: Decompose these opcodes instead of bailing here. */
6226 if (COMPILE_SOFT_FLOAT (cfg))
6229 MONO_INST_NEW (cfg, f2i, f2i_opcode);
6230 f2i->dreg = mono_alloc_ireg (cfg);
6231 f2i->sreg1 = args [1]->dreg;
6232 if (f2i_opcode == OP_MOVE_F_TO_I4)
6233 f2i->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6234 MONO_ADD_INS (cfg->cbb, f2i);
6237 MONO_INST_NEW (cfg, ins, opcode);
6238 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : mono_alloc_ireg (cfg);
6239 ins->inst_basereg = args [0]->dreg;
6240 ins->inst_offset = 0;
6241 ins->sreg2 = is_float ? f2i->dreg : args [1]->dreg;
6242 MONO_ADD_INS (cfg->cbb, ins);
6244 switch (fsig->params [0]->type) {
6246 ins->type = STACK_I4;
6249 ins->type = STACK_I8;
6252 #if SIZEOF_REGISTER == 8
6253 ins->type = STACK_I8;
6255 ins->type = STACK_I4;
6260 ins->type = STACK_R8;
6263 g_assert (mini_type_is_reference (fsig->params [0]));
6264 ins->type = STACK_OBJ;
6269 MONO_INST_NEW (cfg, i2f, i2f_opcode);
6270 i2f->dreg = mono_alloc_freg (cfg);
6271 i2f->sreg1 = ins->dreg;
6272 i2f->type = STACK_R8;
6273 if (i2f_opcode == OP_MOVE_I4_TO_F)
6274 i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6275 MONO_ADD_INS (cfg->cbb, i2f);
6280 if (cfg->gen_write_barriers && is_ref)
6281 emit_write_barrier (cfg, args [0], args [1]);
6283 else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 3) {
6284 MonoInst *f2i_new = NULL, *f2i_cmp = NULL, *i2f;
6285 guint32 opcode, f2i_opcode, i2f_opcode;
6286 gboolean is_ref = mini_type_is_reference (fsig->params [1]);
6287 gboolean is_float = fsig->params [1]->type == MONO_TYPE_R4 || fsig->params [1]->type == MONO_TYPE_R8;
6289 if (fsig->params [1]->type == MONO_TYPE_I4 ||
6290 fsig->params [1]->type == MONO_TYPE_R4) {
6291 opcode = OP_ATOMIC_CAS_I4;
6292 f2i_opcode = OP_MOVE_F_TO_I4;
6293 i2f_opcode = OP_MOVE_I4_TO_F;
6294 cfg->has_atomic_cas_i4 = TRUE;
6296 #if SIZEOF_REGISTER == 8
6298 fsig->params [1]->type == MONO_TYPE_I8 ||
6299 fsig->params [1]->type == MONO_TYPE_R8 ||
6300 fsig->params [1]->type == MONO_TYPE_I) {
6301 opcode = OP_ATOMIC_CAS_I8;
6302 f2i_opcode = OP_MOVE_F_TO_I8;
6303 i2f_opcode = OP_MOVE_I8_TO_F;
6306 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I) {
6307 opcode = OP_ATOMIC_CAS_I4;
6308 cfg->has_atomic_cas_i4 = TRUE;
6314 if (!mono_arch_opcode_supported (opcode))
6318 /* TODO: Decompose these opcodes instead of bailing here. */
6319 if (COMPILE_SOFT_FLOAT (cfg))
6322 MONO_INST_NEW (cfg, f2i_new, f2i_opcode);
6323 f2i_new->dreg = mono_alloc_ireg (cfg);
6324 f2i_new->sreg1 = args [1]->dreg;
6325 if (f2i_opcode == OP_MOVE_F_TO_I4)
6326 f2i_new->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6327 MONO_ADD_INS (cfg->cbb, f2i_new);
6329 MONO_INST_NEW (cfg, f2i_cmp, f2i_opcode);
6330 f2i_cmp->dreg = mono_alloc_ireg (cfg);
6331 f2i_cmp->sreg1 = args [2]->dreg;
6332 if (f2i_opcode == OP_MOVE_F_TO_I4)
6333 f2i_cmp->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6334 MONO_ADD_INS (cfg->cbb, f2i_cmp);
6337 MONO_INST_NEW (cfg, ins, opcode);
6338 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
6339 ins->sreg1 = args [0]->dreg;
6340 ins->sreg2 = is_float ? f2i_new->dreg : args [1]->dreg;
6341 ins->sreg3 = is_float ? f2i_cmp->dreg : args [2]->dreg;
6342 MONO_ADD_INS (cfg->cbb, ins);
6344 switch (fsig->params [1]->type) {
6346 ins->type = STACK_I4;
6349 ins->type = STACK_I8;
6352 #if SIZEOF_REGISTER == 8
6353 ins->type = STACK_I8;
6355 ins->type = STACK_I4;
6359 ins->type = cfg->r4_stack_type;
6362 ins->type = STACK_R8;
6365 g_assert (mini_type_is_reference (fsig->params [1]));
6366 ins->type = STACK_OBJ;
6371 MONO_INST_NEW (cfg, i2f, i2f_opcode);
6372 i2f->dreg = mono_alloc_freg (cfg);
6373 i2f->sreg1 = ins->dreg;
6374 i2f->type = STACK_R8;
6375 if (i2f_opcode == OP_MOVE_I4_TO_F)
6376 i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6377 MONO_ADD_INS (cfg->cbb, i2f);
6382 if (cfg->gen_write_barriers && is_ref)
6383 emit_write_barrier (cfg, args [0], args [1]);
6385 else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 4 &&
6386 fsig->params [1]->type == MONO_TYPE_I4) {
6387 MonoInst *cmp, *ceq;
6389 if (!mono_arch_opcode_supported (OP_ATOMIC_CAS_I4))
6392 /* int32 r = CAS (location, value, comparand); */
6393 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
6394 ins->dreg = alloc_ireg (cfg);
6395 ins->sreg1 = args [0]->dreg;
6396 ins->sreg2 = args [1]->dreg;
6397 ins->sreg3 = args [2]->dreg;
6398 ins->type = STACK_I4;
6399 MONO_ADD_INS (cfg->cbb, ins);
6401 /* bool result = r == comparand; */
6402 MONO_INST_NEW (cfg, cmp, OP_ICOMPARE);
6403 cmp->sreg1 = ins->dreg;
6404 cmp->sreg2 = args [2]->dreg;
6405 cmp->type = STACK_I4;
6406 MONO_ADD_INS (cfg->cbb, cmp);
6408 MONO_INST_NEW (cfg, ceq, OP_ICEQ);
6409 ceq->dreg = alloc_ireg (cfg);
6410 ceq->type = STACK_I4;
6411 MONO_ADD_INS (cfg->cbb, ceq);
6413 /* *success = result; */
6414 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, args [3]->dreg, 0, ceq->dreg);
6416 cfg->has_atomic_cas_i4 = TRUE;
6418 else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0)
6419 ins = emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6423 } else if (cmethod->klass->image == mono_defaults.corlib &&
6424 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
6425 (strcmp (cmethod->klass->name, "Volatile") == 0)) {
6428 if (!strcmp (cmethod->name, "Read") && fsig->param_count == 1) {
6430 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
6431 gboolean is_float = fsig->params [0]->type == MONO_TYPE_R4 || fsig->params [0]->type == MONO_TYPE_R8;
6433 if (fsig->params [0]->type == MONO_TYPE_I1)
6434 opcode = OP_ATOMIC_LOAD_I1;
6435 else if (fsig->params [0]->type == MONO_TYPE_U1 || fsig->params [0]->type == MONO_TYPE_BOOLEAN)
6436 opcode = OP_ATOMIC_LOAD_U1;
6437 else if (fsig->params [0]->type == MONO_TYPE_I2)
6438 opcode = OP_ATOMIC_LOAD_I2;
6439 else if (fsig->params [0]->type == MONO_TYPE_U2)
6440 opcode = OP_ATOMIC_LOAD_U2;
6441 else if (fsig->params [0]->type == MONO_TYPE_I4)
6442 opcode = OP_ATOMIC_LOAD_I4;
6443 else if (fsig->params [0]->type == MONO_TYPE_U4)
6444 opcode = OP_ATOMIC_LOAD_U4;
6445 else if (fsig->params [0]->type == MONO_TYPE_R4)
6446 opcode = OP_ATOMIC_LOAD_R4;
6447 else if (fsig->params [0]->type == MONO_TYPE_R8)
6448 opcode = OP_ATOMIC_LOAD_R8;
6449 #if SIZEOF_REGISTER == 8
6450 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_I)
6451 opcode = OP_ATOMIC_LOAD_I8;
6452 else if (is_ref || fsig->params [0]->type == MONO_TYPE_U8 || fsig->params [0]->type == MONO_TYPE_U)
6453 opcode = OP_ATOMIC_LOAD_U8;
6455 else if (fsig->params [0]->type == MONO_TYPE_I)
6456 opcode = OP_ATOMIC_LOAD_I4;
6457 else if (is_ref || fsig->params [0]->type == MONO_TYPE_U)
6458 opcode = OP_ATOMIC_LOAD_U4;
6462 if (!mono_arch_opcode_supported (opcode))
6465 MONO_INST_NEW (cfg, ins, opcode);
6466 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : (is_float ? mono_alloc_freg (cfg) : mono_alloc_ireg (cfg));
6467 ins->sreg1 = args [0]->dreg;
6468 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_ACQ;
6469 MONO_ADD_INS (cfg->cbb, ins);
6471 switch (fsig->params [0]->type) {
6472 case MONO_TYPE_BOOLEAN:
6479 ins->type = STACK_I4;
6483 ins->type = STACK_I8;
6487 #if SIZEOF_REGISTER == 8
6488 ins->type = STACK_I8;
6490 ins->type = STACK_I4;
6494 ins->type = cfg->r4_stack_type;
6497 ins->type = STACK_R8;
6500 g_assert (mini_type_is_reference (fsig->params [0]));
6501 ins->type = STACK_OBJ;
6507 if (!strcmp (cmethod->name, "Write") && fsig->param_count == 2) {
6509 gboolean is_ref = mini_type_is_reference (fsig->params [0]);
6511 if (fsig->params [0]->type == MONO_TYPE_I1)
6512 opcode = OP_ATOMIC_STORE_I1;
6513 else if (fsig->params [0]->type == MONO_TYPE_U1 || fsig->params [0]->type == MONO_TYPE_BOOLEAN)
6514 opcode = OP_ATOMIC_STORE_U1;
6515 else if (fsig->params [0]->type == MONO_TYPE_I2)
6516 opcode = OP_ATOMIC_STORE_I2;
6517 else if (fsig->params [0]->type == MONO_TYPE_U2)
6518 opcode = OP_ATOMIC_STORE_U2;
6519 else if (fsig->params [0]->type == MONO_TYPE_I4)
6520 opcode = OP_ATOMIC_STORE_I4;
6521 else if (fsig->params [0]->type == MONO_TYPE_U4)
6522 opcode = OP_ATOMIC_STORE_U4;
6523 else if (fsig->params [0]->type == MONO_TYPE_R4)
6524 opcode = OP_ATOMIC_STORE_R4;
6525 else if (fsig->params [0]->type == MONO_TYPE_R8)
6526 opcode = OP_ATOMIC_STORE_R8;
6527 #if SIZEOF_REGISTER == 8
6528 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_I)
6529 opcode = OP_ATOMIC_STORE_I8;
6530 else if (is_ref || fsig->params [0]->type == MONO_TYPE_U8 || fsig->params [0]->type == MONO_TYPE_U)
6531 opcode = OP_ATOMIC_STORE_U8;
6533 else if (fsig->params [0]->type == MONO_TYPE_I)
6534 opcode = OP_ATOMIC_STORE_I4;
6535 else if (is_ref || fsig->params [0]->type == MONO_TYPE_U)
6536 opcode = OP_ATOMIC_STORE_U4;
6540 if (!mono_arch_opcode_supported (opcode))
6543 MONO_INST_NEW (cfg, ins, opcode);
6544 ins->dreg = args [0]->dreg;
6545 ins->sreg1 = args [1]->dreg;
6546 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_REL;
6547 MONO_ADD_INS (cfg->cbb, ins);
6549 if (cfg->gen_write_barriers && is_ref)
6550 emit_write_barrier (cfg, args [0], args [1]);
6556 } else if (cmethod->klass->image == mono_defaults.corlib &&
6557 (strcmp (cmethod->klass->name_space, "System.Diagnostics") == 0) &&
6558 (strcmp (cmethod->klass->name, "Debugger") == 0)) {
6559 if (!strcmp (cmethod->name, "Break") && fsig->param_count == 0) {
6560 if (should_insert_brekpoint (cfg->method)) {
6561 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
6563 MONO_INST_NEW (cfg, ins, OP_NOP);
6564 MONO_ADD_INS (cfg->cbb, ins);
6568 } else if (cmethod->klass->image == mono_defaults.corlib &&
6569 (strcmp (cmethod->klass->name_space, "System") == 0) &&
6570 (strcmp (cmethod->klass->name, "Environment") == 0)) {
6571 if (!strcmp (cmethod->name, "get_IsRunningOnWindows") && fsig->param_count == 0) {
6573 EMIT_NEW_ICONST (cfg, ins, 1);
6575 EMIT_NEW_ICONST (cfg, ins, 0);
6578 } else if (cmethod->klass == mono_defaults.math_class) {
6580 * There is general branchless code for Min/Max, but it does not work for
6582 * http://everything2.com/?node_id=1051618
6584 } else if (((!strcmp (cmethod->klass->image->assembly->aname.name, "MonoMac") ||
6585 !strcmp (cmethod->klass->image->assembly->aname.name, "monotouch")) &&
6586 !strcmp (cmethod->klass->name_space, "XamCore.ObjCRuntime") &&
6587 !strcmp (cmethod->klass->name, "Selector")) ||
6588 (!strcmp (cmethod->klass->image->assembly->aname.name, "Xamarin.iOS") &&
6589 !strcmp (cmethod->klass->name_space, "ObjCRuntime") &&
6590 !strcmp (cmethod->klass->name, "Selector"))
6592 #ifdef MONO_ARCH_HAVE_OBJC_GET_SELECTOR
6593 if (!strcmp (cmethod->name, "GetHandle") && fsig->param_count == 1 &&
6594 (args [0]->opcode == OP_GOT_ENTRY || args [0]->opcode == OP_AOTCONST) &&
6597 MonoJumpInfoToken *ji;
6600 cfg->disable_llvm = TRUE;
6602 if (args [0]->opcode == OP_GOT_ENTRY) {
6603 pi = args [0]->inst_p1;
6604 g_assert (pi->opcode == OP_PATCH_INFO);
6605 g_assert (GPOINTER_TO_INT (pi->inst_p1) == MONO_PATCH_INFO_LDSTR);
6608 g_assert (GPOINTER_TO_INT (args [0]->inst_p1) == MONO_PATCH_INFO_LDSTR);
6609 ji = args [0]->inst_p0;
6612 NULLIFY_INS (args [0]);
6615 s = mono_ldstr (cfg->domain, ji->image, mono_metadata_token_index (ji->token));
6616 MONO_INST_NEW (cfg, ins, OP_OBJC_GET_SELECTOR);
6617 ins->dreg = mono_alloc_ireg (cfg);
6619 ins->inst_p0 = mono_string_to_utf8 (s);
6620 MONO_ADD_INS (cfg->cbb, ins);
6626 #ifdef MONO_ARCH_SIMD_INTRINSICS
6627 if (cfg->opt & MONO_OPT_SIMD) {
6628 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
6634 ins = mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
6638 if (COMPILE_LLVM (cfg)) {
6639 ins = llvm_emit_inst_for_method (cfg, cmethod, fsig, args);
6644 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
6648 * This entry point could be used later for arbitrary method
6651 inline static MonoInst*
6652 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
6653 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
6655 if (method->klass == mono_defaults.string_class) {
6656 /* managed string allocation support */
6657 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS) && !(cfg->opt & MONO_OPT_SHARED)) {
6658 MonoInst *iargs [2];
6659 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
6660 MonoMethod *managed_alloc = NULL;
6662 g_assert (vtable); /*Should not fail since it System.String*/
6663 #ifndef MONO_CROSS_COMPILE
6664 managed_alloc = mono_gc_get_managed_allocator (method->klass, FALSE, FALSE);
6668 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
6669 iargs [1] = args [0];
6670 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
6677 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
6679 MonoInst *store, *temp;
6682 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
6683 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
6686 * FIXME: We should use *args++ = sp [0], but that would mean the arg
6687 * would be different than the MonoInst's used to represent arguments, and
6688 * the ldelema implementation can't deal with that.
6689 * Solution: When ldelema is used on an inline argument, create a var for
6690 * it, emit ldelema on that var, and emit the saving code below in
6691 * inline_method () if needed.
6693 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
6694 cfg->args [i] = temp;
6695 /* This uses cfg->args [i] which is set by the preceeding line */
6696 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
6697 store->cil_code = sp [0]->cil_code;
6702 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
6703 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
6705 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
6707 check_inline_called_method_name_limit (MonoMethod *called_method)
6710 static const char *limit = NULL;
6712 if (limit == NULL) {
6713 const char *limit_string = g_getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
6715 if (limit_string != NULL)
6716 limit = limit_string;
6721 if (limit [0] != '\0') {
6722 char *called_method_name = mono_method_full_name (called_method, TRUE);
6724 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
6725 g_free (called_method_name);
6727 //return (strncmp_result <= 0);
6728 return (strncmp_result == 0);
6735 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
6737 check_inline_caller_method_name_limit (MonoMethod *caller_method)
6740 static const char *limit = NULL;
6742 if (limit == NULL) {
6743 const char *limit_string = g_getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
6744 if (limit_string != NULL) {
6745 limit = limit_string;
6751 if (limit [0] != '\0') {
6752 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
6754 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
6755 g_free (caller_method_name);
6757 //return (strncmp_result <= 0);
6758 return (strncmp_result == 0);
6766 emit_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
6768 static double r8_0 = 0.0;
6769 static float r4_0 = 0.0;
6773 rtype = mini_get_underlying_type (rtype);
6777 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
6778 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
6779 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
6780 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
6781 MONO_EMIT_NEW_I8CONST (cfg, dreg, 0);
6782 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
6783 MONO_INST_NEW (cfg, ins, OP_R4CONST);
6784 ins->type = STACK_R4;
6785 ins->inst_p0 = (void*)&r4_0;
6787 MONO_ADD_INS (cfg->cbb, ins);
6788 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
6789 MONO_INST_NEW (cfg, ins, OP_R8CONST);
6790 ins->type = STACK_R8;
6791 ins->inst_p0 = (void*)&r8_0;
6793 MONO_ADD_INS (cfg->cbb, ins);
6794 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
6795 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
6796 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
6797 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (rtype)) {
6798 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
6800 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
6805 emit_dummy_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
6809 rtype = mini_get_underlying_type (rtype);
6813 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_PCONST);
6814 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
6815 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_ICONST);
6816 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
6817 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_I8CONST);
6818 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
6819 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R4CONST);
6820 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
6821 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R8CONST);
6822 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
6823 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
6824 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
6825 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (rtype)) {
6826 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
6828 emit_init_rvar (cfg, dreg, rtype);
6832 /* If INIT is FALSE, emit dummy initialization statements to keep the IR valid */
6834 emit_init_local (MonoCompile *cfg, int local, MonoType *type, gboolean init)
6836 MonoInst *var = cfg->locals [local];
6837 if (COMPILE_SOFT_FLOAT (cfg)) {
6839 int reg = alloc_dreg (cfg, var->type);
6840 emit_init_rvar (cfg, reg, type);
6841 EMIT_NEW_LOCSTORE (cfg, store, local, cfg->cbb->last_ins);
6844 emit_init_rvar (cfg, var->dreg, type);
6846 emit_dummy_init_rvar (cfg, var->dreg, type);
6853 * Return the cost of inlining CMETHOD.
6856 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
6857 guchar *ip, guint real_offset, gboolean inline_always)
6859 MonoInst *ins, *rvar = NULL;
6860 MonoMethodHeader *cheader;
6861 MonoBasicBlock *ebblock, *sbblock;
6863 MonoMethod *prev_inlined_method;
6864 MonoInst **prev_locals, **prev_args;
6865 MonoType **prev_arg_types;
6866 guint prev_real_offset;
6867 GHashTable *prev_cbb_hash;
6868 MonoBasicBlock **prev_cil_offset_to_bb;
6869 MonoBasicBlock *prev_cbb;
6870 unsigned char* prev_cil_start;
6871 guint32 prev_cil_offset_to_bb_len;
6872 MonoMethod *prev_current_method;
6873 MonoGenericContext *prev_generic_context;
6874 gboolean ret_var_set, prev_ret_var_set, prev_disable_inline, virtual = FALSE;
6876 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
6878 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
6879 if ((! inline_always) && ! check_inline_called_method_name_limit (cmethod))
6882 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
6883 if ((! inline_always) && ! check_inline_caller_method_name_limit (cfg->method))
6888 fsig = mono_method_signature (cmethod);
6890 if (cfg->verbose_level > 2)
6891 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
6893 if (!cmethod->inline_info) {
6894 cfg->stat_inlineable_methods++;
6895 cmethod->inline_info = 1;
6898 /* allocate local variables */
6899 cheader = mono_method_get_header (cmethod);
6901 if (cheader == NULL || mono_loader_get_last_error ()) {
6902 MonoLoaderError *error = mono_loader_get_last_error ();
6905 mono_metadata_free_mh (cheader);
6906 if (inline_always && error)
6907 mono_cfg_set_exception (cfg, error->exception_type);
6909 mono_loader_clear_error ();
6913 /*Must verify before creating locals as it can cause the JIT to assert.*/
6914 if (mono_compile_is_broken (cfg, cmethod, FALSE)) {
6915 mono_metadata_free_mh (cheader);
6919 /* allocate space to store the return value */
6920 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6921 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
6924 prev_locals = cfg->locals;
6925 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
6926 for (i = 0; i < cheader->num_locals; ++i)
6927 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
6929 /* allocate start and end blocks */
6930 /* This is needed so if the inline is aborted, we can clean up */
6931 NEW_BBLOCK (cfg, sbblock);
6932 sbblock->real_offset = real_offset;
6934 NEW_BBLOCK (cfg, ebblock);
6935 ebblock->block_num = cfg->num_bblocks++;
6936 ebblock->real_offset = real_offset;
6938 prev_args = cfg->args;
6939 prev_arg_types = cfg->arg_types;
6940 prev_inlined_method = cfg->inlined_method;
6941 cfg->inlined_method = cmethod;
6942 cfg->ret_var_set = FALSE;
6943 cfg->inline_depth ++;
6944 prev_real_offset = cfg->real_offset;
6945 prev_cbb_hash = cfg->cbb_hash;
6946 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
6947 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
6948 prev_cil_start = cfg->cil_start;
6949 prev_cbb = cfg->cbb;
6950 prev_current_method = cfg->current_method;
6951 prev_generic_context = cfg->generic_context;
6952 prev_ret_var_set = cfg->ret_var_set;
6953 prev_disable_inline = cfg->disable_inline;
6955 if (ip && *ip == CEE_CALLVIRT && !(cmethod->flags & METHOD_ATTRIBUTE_STATIC))
6958 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, sp, real_offset, virtual);
6960 ret_var_set = cfg->ret_var_set;
6962 cfg->inlined_method = prev_inlined_method;
6963 cfg->real_offset = prev_real_offset;
6964 cfg->cbb_hash = prev_cbb_hash;
6965 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
6966 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
6967 cfg->cil_start = prev_cil_start;
6968 cfg->locals = prev_locals;
6969 cfg->args = prev_args;
6970 cfg->arg_types = prev_arg_types;
6971 cfg->current_method = prev_current_method;
6972 cfg->generic_context = prev_generic_context;
6973 cfg->ret_var_set = prev_ret_var_set;
6974 cfg->disable_inline = prev_disable_inline;
6975 cfg->inline_depth --;
6977 if ((costs >= 0 && costs < 60) || inline_always) {
6978 if (cfg->verbose_level > 2)
6979 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
6981 cfg->stat_inlined_methods++;
6983 /* always add some code to avoid block split failures */
6984 MONO_INST_NEW (cfg, ins, OP_NOP);
6985 MONO_ADD_INS (prev_cbb, ins);
6987 prev_cbb->next_bb = sbblock;
6988 link_bblock (cfg, prev_cbb, sbblock);
6991 * Get rid of the begin and end bblocks if possible to aid local
6994 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
6996 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
6997 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
6999 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
7000 MonoBasicBlock *prev = ebblock->in_bb [0];
7001 mono_merge_basic_blocks (cfg, prev, ebblock);
7003 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
7004 mono_merge_basic_blocks (cfg, prev_cbb, prev);
7005 cfg->cbb = prev_cbb;
7009 * Its possible that the rvar is set in some prev bblock, but not in others.
7015 for (i = 0; i < ebblock->in_count; ++i) {
7016 bb = ebblock->in_bb [i];
7018 if (bb->last_ins && bb->last_ins->opcode == OP_NOT_REACHED) {
7021 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
7031 * If the inlined method contains only a throw, then the ret var is not
7032 * set, so set it to a dummy value.
7035 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
7037 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
7040 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
7043 if (cfg->verbose_level > 2)
7044 printf ("INLINE ABORTED %s (cost %d)\n", mono_method_full_name (cmethod, TRUE), costs);
7045 cfg->exception_type = MONO_EXCEPTION_NONE;
7046 mono_loader_clear_error ();
7048 /* This gets rid of the newly added bblocks */
7049 cfg->cbb = prev_cbb;
7051 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
7056 * Some of these comments may well be out-of-date.
7057 * Design decisions: we do a single pass over the IL code (and we do bblock
7058 * splitting/merging in the few cases when it's required: a back jump to an IL
7059 * address that was not already seen as bblock starting point).
7060 * Code is validated as we go (full verification is still better left to metadata/verify.c).
7061 * Complex operations are decomposed in simpler ones right away. We need to let the
7062 * arch-specific code peek and poke inside this process somehow (except when the
7063 * optimizations can take advantage of the full semantic info of coarse opcodes).
7064 * All the opcodes of the form opcode.s are 'normalized' to opcode.
7065 * MonoInst->opcode initially is the IL opcode or some simplification of that
7066 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
7067 * opcode with value bigger than OP_LAST.
7068 * At this point the IR can be handed over to an interpreter, a dumb code generator
7069 * or to the optimizing code generator that will translate it to SSA form.
7071 * Profiling directed optimizations.
7072 * We may compile by default with few or no optimizations and instrument the code
7073 * or the user may indicate what methods to optimize the most either in a config file
7074 * or through repeated runs where the compiler applies offline the optimizations to
7075 * each method and then decides if it was worth it.
7078 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
7079 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
7080 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
7081 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
7082 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
7083 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
7084 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
7085 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) TYPE_LOAD_ERROR ((klass))
7087 /* offset from br.s -> br like opcodes */
7088 #define BIG_BRANCH_OFFSET 13
7091 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
7093 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
7095 return b == NULL || b == bb;
7099 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
7101 unsigned char *ip = start;
7102 unsigned char *target;
7105 MonoBasicBlock *bblock;
7106 const MonoOpcode *opcode;
7109 cli_addr = ip - start;
7110 i = mono_opcode_value ((const guint8 **)&ip, end);
7113 opcode = &mono_opcodes [i];
7114 switch (opcode->argument) {
7115 case MonoInlineNone:
7118 case MonoInlineString:
7119 case MonoInlineType:
7120 case MonoInlineField:
7121 case MonoInlineMethod:
7124 case MonoShortInlineR:
7131 case MonoShortInlineVar:
7132 case MonoShortInlineI:
7135 case MonoShortInlineBrTarget:
7136 target = start + cli_addr + 2 + (signed char)ip [1];
7137 GET_BBLOCK (cfg, bblock, target);
7140 GET_BBLOCK (cfg, bblock, ip);
7142 case MonoInlineBrTarget:
7143 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
7144 GET_BBLOCK (cfg, bblock, target);
7147 GET_BBLOCK (cfg, bblock, ip);
7149 case MonoInlineSwitch: {
7150 guint32 n = read32 (ip + 1);
7153 cli_addr += 5 + 4 * n;
7154 target = start + cli_addr;
7155 GET_BBLOCK (cfg, bblock, target);
7157 for (j = 0; j < n; ++j) {
7158 target = start + cli_addr + (gint32)read32 (ip);
7159 GET_BBLOCK (cfg, bblock, target);
7169 g_assert_not_reached ();
7172 if (i == CEE_THROW) {
7173 unsigned char *bb_start = ip - 1;
7175 /* Find the start of the bblock containing the throw */
7177 while ((bb_start >= start) && !bblock) {
7178 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
7182 bblock->out_of_line = 1;
7192 static inline MonoMethod *
7193 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
7197 if (m->wrapper_type != MONO_WRAPPER_NONE) {
7198 method = mono_method_get_wrapper_data (m, token);
7201 method = mono_class_inflate_generic_method_checked (method, context, &error);
7202 g_assert (mono_error_ok (&error)); /* FIXME don't swallow the error */
7205 method = mono_get_method_full (m->klass->image, token, klass, context);
7211 static inline MonoMethod *
7212 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
7214 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
7216 if (method && cfg && !cfg->gshared && mono_class_is_open_constructed_type (&method->klass->byval_arg))
7222 static inline MonoClass*
7223 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
7228 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7229 klass = mono_method_get_wrapper_data (method, token);
7231 klass = mono_class_inflate_generic_class (klass, context);
7233 klass = mono_class_get_and_inflate_typespec_checked (method->klass->image, token, context, &error);
7234 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7237 mono_class_init (klass);
7241 static inline MonoMethodSignature*
7242 mini_get_signature (MonoMethod *method, guint32 token, MonoGenericContext *context)
7244 MonoMethodSignature *fsig;
7246 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7249 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
7251 fsig = mono_inflate_generic_signature (fsig, context, &error);
7253 g_assert (mono_error_ok (&error));
7256 fsig = mono_metadata_parse_signature (method->klass->image, token);
7262 throw_exception (void)
7264 static MonoMethod *method = NULL;
7267 MonoSecurityManager *secman = mono_security_manager_get_methods ();
7268 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
7275 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
7277 MonoMethod *thrower = throw_exception ();
7280 EMIT_NEW_PCONST (cfg, args [0], ex);
7281 mono_emit_method_call (cfg, thrower, args, NULL);
7285 * Return the original method is a wrapper is specified. We can only access
7286 * the custom attributes from the original method.
7289 get_original_method (MonoMethod *method)
7291 if (method->wrapper_type == MONO_WRAPPER_NONE)
7294 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
7295 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
7298 /* in other cases we need to find the original method */
7299 return mono_marshal_method_from_wrapper (method);
7303 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field)
7305 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
7306 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
7308 emit_throw_exception (cfg, ex);
7312 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
7314 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
7315 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
7317 emit_throw_exception (cfg, ex);
7321 * Check that the IL instructions at ip are the array initialization
7322 * sequence and return the pointer to the data and the size.
7325 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
7328 * newarr[System.Int32]
7330 * ldtoken field valuetype ...
7331 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
7333 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
7335 guint32 token = read32 (ip + 7);
7336 guint32 field_token = read32 (ip + 2);
7337 guint32 field_index = field_token & 0xffffff;
7339 const char *data_ptr;
7341 MonoMethod *cmethod;
7342 MonoClass *dummy_class;
7343 MonoClassField *field = mono_field_from_token_checked (method->klass->image, field_token, &dummy_class, NULL, &error);
7347 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7351 *out_field_token = field_token;
7353 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
7356 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
7358 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
7359 case MONO_TYPE_BOOLEAN:
7363 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
7364 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
7365 case MONO_TYPE_CHAR:
7382 if (size > mono_type_size (field->type, &dummy_align))
7385 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
7386 if (!image_is_dynamic (method->klass->image)) {
7387 field_index = read32 (ip + 2) & 0xffffff;
7388 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
7389 data_ptr = mono_image_rva_map (method->klass->image, rva);
7390 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
7391 /* for aot code we do the lookup on load */
7392 if (aot && data_ptr)
7393 return GUINT_TO_POINTER (rva);
7395 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
7397 data_ptr = mono_field_get_data (field);
7405 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
7407 char *method_fname = mono_method_full_name (method, TRUE);
7409 MonoMethodHeader *header = mono_method_get_header (method);
7411 if (header->code_size == 0)
7412 method_code = g_strdup ("method body is empty.");
7414 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
7415 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
7416 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
7417 g_free (method_fname);
7418 g_free (method_code);
7419 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
7423 set_exception_object (MonoCompile *cfg, MonoException *exception)
7425 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OBJECT_SUPPLIED);
7426 MONO_GC_REGISTER_ROOT_SINGLE (cfg->exception_ptr);
7427 cfg->exception_ptr = exception;
7431 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
7434 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
7435 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
7436 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
7437 /* Optimize reg-reg moves away */
7439 * Can't optimize other opcodes, since sp[0] might point to
7440 * the last ins of a decomposed opcode.
7442 sp [0]->dreg = (cfg)->locals [n]->dreg;
7444 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
7449 * ldloca inhibits many optimizations so try to get rid of it in common
7452 static inline unsigned char *
7453 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
7463 local = read16 (ip + 2);
7467 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
7468 /* From the INITOBJ case */
7469 token = read32 (ip + 2);
7470 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
7471 CHECK_TYPELOAD (klass);
7472 type = mini_get_underlying_type (&klass->byval_arg);
7473 emit_init_local (cfg, local, type, TRUE);
7481 is_exception_class (MonoClass *class)
7484 if (class == mono_defaults.exception_class)
7486 class = class->parent;
7492 * is_jit_optimizer_disabled:
7494 * Determine whenever M's assembly has a DebuggableAttribute with the
7495 * IsJITOptimizerDisabled flag set.
7498 is_jit_optimizer_disabled (MonoMethod *m)
7500 MonoAssembly *ass = m->klass->image->assembly;
7501 MonoCustomAttrInfo* attrs;
7502 static MonoClass *klass;
7504 gboolean val = FALSE;
7507 if (ass->jit_optimizer_disabled_inited)
7508 return ass->jit_optimizer_disabled;
7511 klass = mono_class_from_name (mono_defaults.corlib, "System.Diagnostics", "DebuggableAttribute");
7514 ass->jit_optimizer_disabled = FALSE;
7515 mono_memory_barrier ();
7516 ass->jit_optimizer_disabled_inited = TRUE;
7520 attrs = mono_custom_attrs_from_assembly (ass);
7522 for (i = 0; i < attrs->num_attrs; ++i) {
7523 MonoCustomAttrEntry *attr = &attrs->attrs [i];
7525 MonoMethodSignature *sig;
7527 if (!attr->ctor || attr->ctor->klass != klass)
7529 /* Decode the attribute. See reflection.c */
7530 p = (const char*)attr->data;
7531 g_assert (read16 (p) == 0x0001);
7534 // FIXME: Support named parameters
7535 sig = mono_method_signature (attr->ctor);
7536 if (sig->param_count != 2 || sig->params [0]->type != MONO_TYPE_BOOLEAN || sig->params [1]->type != MONO_TYPE_BOOLEAN)
7538 /* Two boolean arguments */
7542 mono_custom_attrs_free (attrs);
7545 ass->jit_optimizer_disabled = val;
7546 mono_memory_barrier ();
7547 ass->jit_optimizer_disabled_inited = TRUE;
7553 is_supported_tail_call (MonoCompile *cfg, MonoMethod *method, MonoMethod *cmethod, MonoMethodSignature *fsig, int call_opcode)
7555 gboolean supported_tail_call;
7558 #ifdef MONO_ARCH_HAVE_OP_TAIL_CALL
7559 supported_tail_call = mono_arch_tail_call_supported (cfg, mono_method_signature (method), mono_method_signature (cmethod));
7561 supported_tail_call = mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret);
7564 for (i = 0; i < fsig->param_count; ++i) {
7565 if (fsig->params [i]->byref || fsig->params [i]->type == MONO_TYPE_PTR || fsig->params [i]->type == MONO_TYPE_FNPTR)
7566 /* These can point to the current method's stack */
7567 supported_tail_call = FALSE;
7569 if (fsig->hasthis && cmethod->klass->valuetype)
7570 /* this might point to the current method's stack */
7571 supported_tail_call = FALSE;
7572 if (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)
7573 supported_tail_call = FALSE;
7574 if (cfg->method->save_lmf)
7575 supported_tail_call = FALSE;
7576 if (cmethod->wrapper_type && cmethod->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
7577 supported_tail_call = FALSE;
7578 if (call_opcode != CEE_CALL)
7579 supported_tail_call = FALSE;
7581 /* Debugging support */
7583 if (supported_tail_call) {
7584 if (!mono_debug_count ())
7585 supported_tail_call = FALSE;
7589 return supported_tail_call;
7592 /* emits the code needed to access a managed tls var (like ThreadStatic)
7593 * with the value of the tls offset in offset_reg. thread_ins represents the MonoInternalThread
7594 * pointer for the current thread.
7595 * Returns the MonoInst* representing the address of the tls var.
7598 emit_managed_static_data_access (MonoCompile *cfg, MonoInst *thread_ins, int offset_reg)
7601 int static_data_reg, array_reg, dreg;
7602 int offset2_reg, idx_reg;
7603 // inlined access to the tls data (see threads.c)
7604 static_data_reg = alloc_ireg (cfg);
7605 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, MONO_STRUCT_OFFSET (MonoInternalThread, static_data));
7606 idx_reg = alloc_ireg (cfg);
7607 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, idx_reg, offset_reg, 0x3f);
7608 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
7609 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
7610 array_reg = alloc_ireg (cfg);
7611 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
7612 offset2_reg = alloc_ireg (cfg);
7613 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_UN_IMM, offset2_reg, offset_reg, 6);
7614 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset2_reg, 0x1ffffff);
7615 dreg = alloc_ireg (cfg);
7616 EMIT_NEW_BIALU (cfg, addr, OP_PADD, dreg, array_reg, offset2_reg);
7623 * Handle calls made to ctors from NEWOBJ opcodes.
7626 handle_ctor_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used,
7627 MonoInst **sp, guint8 *ip, int *inline_costs)
7629 MonoInst *vtable_arg = NULL, *callvirt_this_arg = NULL, *ins;
7631 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
7632 mono_method_is_generic_sharable (cmethod, TRUE)) {
7633 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
7634 mono_class_vtable (cfg->domain, cmethod->klass);
7635 CHECK_TYPELOAD (cmethod->klass);
7637 vtable_arg = emit_get_rgctx_method (cfg, context_used,
7638 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7641 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
7642 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7644 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7646 CHECK_TYPELOAD (cmethod->klass);
7647 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7652 /* Avoid virtual calls to ctors if possible */
7653 if (mono_class_is_marshalbyref (cmethod->klass))
7654 callvirt_this_arg = sp [0];
7656 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
7657 g_assert (MONO_TYPE_IS_VOID (fsig->ret));
7658 CHECK_CFG_EXCEPTION;
7659 } else if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
7660 mono_method_check_inlining (cfg, cmethod) &&
7661 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE)) {
7664 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, FALSE))) {
7665 cfg->real_offset += 5;
7667 *inline_costs += costs - 5;
7669 INLINE_FAILURE ("inline failure");
7670 // FIXME-VT: Clean this up
7671 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig))
7672 GSHAREDVT_FAILURE(*ip);
7673 mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, callvirt_this_arg, NULL, NULL);
7675 } else if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
7678 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE);
7679 mono_emit_calli (cfg, fsig, sp, addr, NULL, vtable_arg);
7680 } else if (context_used &&
7681 ((!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
7682 !mono_class_generic_sharing_enabled (cmethod->klass)) || cfg->gsharedvt)) {
7683 MonoInst *cmethod_addr;
7685 /* Generic calls made out of gsharedvt methods cannot be patched, so use an indirect call */
7687 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
7688 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7690 mono_emit_calli (cfg, fsig, sp, cmethod_addr, NULL, vtable_arg);
7692 INLINE_FAILURE ("ctor call");
7693 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp,
7694 callvirt_this_arg, NULL, vtable_arg);
7701 * mono_method_to_ir:
7703 * Translate the .net IL into linear IR.
7706 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
7707 MonoInst *return_var, MonoInst **inline_args,
7708 guint inline_offset, gboolean is_virtual_call)
7711 MonoInst *ins, **sp, **stack_start;
7712 MonoBasicBlock *tblock = NULL, *init_localsbb = NULL;
7713 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
7714 MonoMethod *cmethod, *method_definition;
7715 MonoInst **arg_array;
7716 MonoMethodHeader *header;
7718 guint32 token, ins_flag;
7720 MonoClass *constrained_class = NULL;
7721 unsigned char *ip, *end, *target, *err_pos;
7722 MonoMethodSignature *sig;
7723 MonoGenericContext *generic_context = NULL;
7724 MonoGenericContainer *generic_container = NULL;
7725 MonoType **param_types;
7726 int i, n, start_new_bblock, dreg;
7727 int num_calls = 0, inline_costs = 0;
7728 int breakpoint_id = 0;
7730 GSList *class_inits = NULL;
7731 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
7733 gboolean init_locals, seq_points, skip_dead_blocks;
7734 gboolean sym_seq_points = FALSE;
7735 MonoDebugMethodInfo *minfo;
7736 MonoBitSet *seq_point_locs = NULL;
7737 MonoBitSet *seq_point_set_locs = NULL;
7739 cfg->disable_inline = is_jit_optimizer_disabled (method);
7741 /* serialization and xdomain stuff may need access to private fields and methods */
7742 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
7743 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
7744 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
7745 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
7746 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
7747 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
7749 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
7750 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
7751 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
7752 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
7753 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_STELEMREF;
7755 image = method->klass->image;
7756 header = mono_method_get_header (method);
7758 MonoLoaderError *error;
7760 if ((error = mono_loader_get_last_error ())) {
7761 mono_cfg_set_exception (cfg, error->exception_type);
7763 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
7764 cfg->exception_message = g_strdup_printf ("Missing or incorrect header for method %s", cfg->method->name);
7766 goto exception_exit;
7768 generic_container = mono_method_get_generic_container (method);
7769 sig = mono_method_signature (method);
7770 num_args = sig->hasthis + sig->param_count;
7771 ip = (unsigned char*)header->code;
7772 cfg->cil_start = ip;
7773 end = ip + header->code_size;
7774 cfg->stat_cil_code_size += header->code_size;
7776 seq_points = cfg->gen_seq_points && cfg->method == method;
7778 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
7779 /* We could hit a seq point before attaching to the JIT (#8338) */
7783 if (cfg->gen_sdb_seq_points && cfg->method == method) {
7784 minfo = mono_debug_lookup_method (method);
7786 MonoSymSeqPoint *sps;
7787 int i, n_il_offsets;
7789 mono_debug_get_seq_points (minfo, NULL, NULL, NULL, &sps, &n_il_offsets);
7790 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7791 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7792 sym_seq_points = TRUE;
7793 for (i = 0; i < n_il_offsets; ++i) {
7794 if (sps [i].il_offset < header->code_size)
7795 mono_bitset_set_fast (seq_point_locs, sps [i].il_offset);
7798 } else if (!method->wrapper_type && !method->dynamic && mono_debug_image_has_debug_info (method->klass->image)) {
7799 /* Methods without line number info like auto-generated property accessors */
7800 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7801 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7802 sym_seq_points = TRUE;
7807 * Methods without init_locals set could cause asserts in various passes
7808 * (#497220). To work around this, we emit dummy initialization opcodes
7809 * (OP_DUMMY_ICONST etc.) which generate no code. These are only supported
7810 * on some platforms.
7812 if ((cfg->opt & MONO_OPT_UNSAFE) && ARCH_HAVE_DUMMY_INIT)
7813 init_locals = header->init_locals;
7817 method_definition = method;
7818 while (method_definition->is_inflated) {
7819 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
7820 method_definition = imethod->declaring;
7823 /* SkipVerification is not allowed if core-clr is enabled */
7824 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
7826 dont_verify_stloc = TRUE;
7829 if (sig->is_inflated)
7830 generic_context = mono_method_get_context (method);
7831 else if (generic_container)
7832 generic_context = &generic_container->context;
7833 cfg->generic_context = generic_context;
7836 g_assert (!sig->has_type_parameters);
7838 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
7839 g_assert (method->is_inflated);
7840 g_assert (mono_method_get_context (method)->method_inst);
7842 if (method->is_inflated && mono_method_get_context (method)->method_inst)
7843 g_assert (sig->generic_param_count);
7845 if (cfg->method == method) {
7846 cfg->real_offset = 0;
7848 cfg->real_offset = inline_offset;
7851 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
7852 cfg->cil_offset_to_bb_len = header->code_size;
7854 cfg->current_method = method;
7856 if (cfg->verbose_level > 2)
7857 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
7859 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
7861 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
7862 for (n = 0; n < sig->param_count; ++n)
7863 param_types [n + sig->hasthis] = sig->params [n];
7864 cfg->arg_types = param_types;
7866 cfg->dont_inline = g_list_prepend (cfg->dont_inline, method);
7867 if (cfg->method == method) {
7869 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
7870 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
7873 NEW_BBLOCK (cfg, start_bblock);
7874 cfg->bb_entry = start_bblock;
7875 start_bblock->cil_code = NULL;
7876 start_bblock->cil_length = 0;
7879 NEW_BBLOCK (cfg, end_bblock);
7880 cfg->bb_exit = end_bblock;
7881 end_bblock->cil_code = NULL;
7882 end_bblock->cil_length = 0;
7883 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
7884 g_assert (cfg->num_bblocks == 2);
7886 arg_array = cfg->args;
7888 if (header->num_clauses) {
7889 cfg->spvars = g_hash_table_new (NULL, NULL);
7890 cfg->exvars = g_hash_table_new (NULL, NULL);
7892 /* handle exception clauses */
7893 for (i = 0; i < header->num_clauses; ++i) {
7894 MonoBasicBlock *try_bb;
7895 MonoExceptionClause *clause = &header->clauses [i];
7896 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
7897 try_bb->real_offset = clause->try_offset;
7898 try_bb->try_start = TRUE;
7899 try_bb->region = ((i + 1) << 8) | clause->flags;
7900 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
7901 tblock->real_offset = clause->handler_offset;
7902 tblock->flags |= BB_EXCEPTION_HANDLER;
7905 * Linking the try block with the EH block hinders inlining as we won't be able to
7906 * merge the bblocks from inlining and produce an artificial hole for no good reason.
7908 if (COMPILE_LLVM (cfg))
7909 link_bblock (cfg, try_bb, tblock);
7911 if (*(ip + clause->handler_offset) == CEE_POP)
7912 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
7914 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
7915 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
7916 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
7917 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
7918 MONO_ADD_INS (tblock, ins);
7920 if (seq_points && clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY) {
7921 /* finally clauses already have a seq point */
7922 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
7923 MONO_ADD_INS (tblock, ins);
7926 /* todo: is a fault block unsafe to optimize? */
7927 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
7928 tblock->flags |= BB_EXCEPTION_UNSAFE;
7931 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
7933 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
7935 /* catch and filter blocks get the exception object on the stack */
7936 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
7937 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
7939 /* mostly like handle_stack_args (), but just sets the input args */
7940 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
7941 tblock->in_scount = 1;
7942 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
7943 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
7947 #ifdef MONO_CONTEXT_SET_LLVM_EXC_REG
7948 /* The EH code passes in the exception in a register to both JITted and LLVM compiled code */
7949 if (!cfg->compile_llvm) {
7950 MONO_INST_NEW (cfg, ins, OP_GET_EX_OBJ);
7951 ins->dreg = tblock->in_stack [0]->dreg;
7952 MONO_ADD_INS (tblock, ins);
7955 MonoInst *dummy_use;
7958 * Add a dummy use for the exvar so its liveness info will be
7961 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
7964 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
7965 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
7966 tblock->flags |= BB_EXCEPTION_HANDLER;
7967 tblock->real_offset = clause->data.filter_offset;
7968 tblock->in_scount = 1;
7969 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
7970 /* The filter block shares the exvar with the handler block */
7971 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
7972 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
7973 MONO_ADD_INS (tblock, ins);
7977 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
7978 clause->data.catch_class &&
7980 mono_class_check_context_used (clause->data.catch_class)) {
7982 * In shared generic code with catch
7983 * clauses containing type variables
7984 * the exception handling code has to
7985 * be able to get to the rgctx.
7986 * Therefore we have to make sure that
7987 * the vtable/mrgctx argument (for
7988 * static or generic methods) or the
7989 * "this" argument (for non-static
7990 * methods) are live.
7992 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
7993 mini_method_get_context (method)->method_inst ||
7994 method->klass->valuetype) {
7995 mono_get_vtable_var (cfg);
7997 MonoInst *dummy_use;
7999 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
8004 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
8005 cfg->cbb = start_bblock;
8006 cfg->args = arg_array;
8007 mono_save_args (cfg, sig, inline_args);
8010 /* FIRST CODE BLOCK */
8011 NEW_BBLOCK (cfg, tblock);
8012 tblock->cil_code = ip;
8016 ADD_BBLOCK (cfg, tblock);
8018 if (cfg->method == method) {
8019 breakpoint_id = mono_debugger_method_has_breakpoint (method);
8020 if (breakpoint_id) {
8021 MONO_INST_NEW (cfg, ins, OP_BREAK);
8022 MONO_ADD_INS (cfg->cbb, ins);
8026 /* we use a separate basic block for the initialization code */
8027 NEW_BBLOCK (cfg, init_localsbb);
8028 cfg->bb_init = init_localsbb;
8029 init_localsbb->real_offset = cfg->real_offset;
8030 start_bblock->next_bb = init_localsbb;
8031 init_localsbb->next_bb = cfg->cbb;
8032 link_bblock (cfg, start_bblock, init_localsbb);
8033 link_bblock (cfg, init_localsbb, cfg->cbb);
8035 cfg->cbb = init_localsbb;
8037 if (cfg->gsharedvt && cfg->method == method) {
8038 MonoGSharedVtMethodInfo *info;
8039 MonoInst *var, *locals_var;
8042 info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoGSharedVtMethodInfo));
8043 info->method = cfg->method;
8044 info->count_entries = 16;
8045 info->entries = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
8046 cfg->gsharedvt_info = info;
8048 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
8049 /* prevent it from being register allocated */
8050 //var->flags |= MONO_INST_VOLATILE;
8051 cfg->gsharedvt_info_var = var;
8053 ins = emit_get_rgctx_gsharedvt_method (cfg, mini_method_check_context_used (cfg, method), method, info);
8054 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, var->dreg, ins->dreg);
8056 /* Allocate locals */
8057 locals_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
8058 /* prevent it from being register allocated */
8059 //locals_var->flags |= MONO_INST_VOLATILE;
8060 cfg->gsharedvt_locals_var = locals_var;
8062 dreg = alloc_ireg (cfg);
8063 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, dreg, var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, locals_size));
8065 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
8066 ins->dreg = locals_var->dreg;
8068 MONO_ADD_INS (cfg->cbb, ins);
8069 cfg->gsharedvt_locals_var_ins = ins;
8071 cfg->flags |= MONO_CFG_HAS_ALLOCA;
8074 ins->flags |= MONO_INST_INIT;
8078 if (mono_security_core_clr_enabled ()) {
8079 /* check if this is native code, e.g. an icall or a p/invoke */
8080 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
8081 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
8083 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
8084 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
8086 /* if this ia a native call then it can only be JITted from platform code */
8087 if ((icall || pinvk) && method->klass && method->klass->image) {
8088 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
8089 MonoException *ex = icall ? mono_get_exception_security () :
8090 mono_get_exception_method_access ();
8091 emit_throw_exception (cfg, ex);
8098 CHECK_CFG_EXCEPTION;
8100 if (header->code_size == 0)
8103 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
8108 if (cfg->method == method)
8109 mono_debug_init_method (cfg, cfg->cbb, breakpoint_id);
8111 for (n = 0; n < header->num_locals; ++n) {
8112 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
8117 /* We force the vtable variable here for all shared methods
8118 for the possibility that they might show up in a stack
8119 trace where their exact instantiation is needed. */
8120 if (cfg->gshared && method == cfg->method) {
8121 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
8122 mini_method_get_context (method)->method_inst ||
8123 method->klass->valuetype) {
8124 mono_get_vtable_var (cfg);
8126 /* FIXME: Is there a better way to do this?
8127 We need the variable live for the duration
8128 of the whole method. */
8129 cfg->args [0]->flags |= MONO_INST_VOLATILE;
8133 /* add a check for this != NULL to inlined methods */
8134 if (is_virtual_call) {
8137 NEW_ARGLOAD (cfg, arg_ins, 0);
8138 MONO_ADD_INS (cfg->cbb, arg_ins);
8139 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
8142 skip_dead_blocks = !dont_verify;
8143 if (skip_dead_blocks) {
8144 original_bb = bb = mono_basic_block_split (method, &cfg->error);
8149 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
8150 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
8153 start_new_bblock = 0;
8155 if (cfg->method == method)
8156 cfg->real_offset = ip - header->code;
8158 cfg->real_offset = inline_offset;
8163 if (start_new_bblock) {
8164 cfg->cbb->cil_length = ip - cfg->cbb->cil_code;
8165 if (start_new_bblock == 2) {
8166 g_assert (ip == tblock->cil_code);
8168 GET_BBLOCK (cfg, tblock, ip);
8170 cfg->cbb->next_bb = tblock;
8172 start_new_bblock = 0;
8173 for (i = 0; i < cfg->cbb->in_scount; ++i) {
8174 if (cfg->verbose_level > 3)
8175 printf ("loading %d from temp %d\n", i, (int)cfg->cbb->in_stack [i]->inst_c0);
8176 EMIT_NEW_TEMPLOAD (cfg, ins, cfg->cbb->in_stack [i]->inst_c0);
8180 g_slist_free (class_inits);
8183 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != cfg->cbb)) {
8184 link_bblock (cfg, cfg->cbb, tblock);
8185 if (sp != stack_start) {
8186 handle_stack_args (cfg, stack_start, sp - stack_start);
8188 CHECK_UNVERIFIABLE (cfg);
8190 cfg->cbb->next_bb = tblock;
8192 for (i = 0; i < cfg->cbb->in_scount; ++i) {
8193 if (cfg->verbose_level > 3)
8194 printf ("loading %d from temp %d\n", i, (int)cfg->cbb->in_stack [i]->inst_c0);
8195 EMIT_NEW_TEMPLOAD (cfg, ins, cfg->cbb->in_stack [i]->inst_c0);
8198 g_slist_free (class_inits);
8203 if (skip_dead_blocks) {
8204 int ip_offset = ip - header->code;
8206 if (ip_offset == bb->end)
8210 int op_size = mono_opcode_size (ip, end);
8211 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
8213 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
8215 if (ip_offset + op_size == bb->end) {
8216 MONO_INST_NEW (cfg, ins, OP_NOP);
8217 MONO_ADD_INS (cfg->cbb, ins);
8218 start_new_bblock = 1;
8226 * Sequence points are points where the debugger can place a breakpoint.
8227 * Currently, we generate these automatically at points where the IL
8230 if (seq_points && ((!sym_seq_points && (sp == stack_start)) || (sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code)))) {
8232 * Make methods interruptable at the beginning, and at the targets of
8233 * backward branches.
8234 * Also, do this at the start of every bblock in methods with clauses too,
8235 * to be able to handle instructions with inprecise control flow like
8237 * Backward branches are handled at the end of method-to-ir ().
8239 gboolean intr_loc = ip == header->code || (!cfg->cbb->last_ins && cfg->header->num_clauses);
8240 gboolean sym_seq_point = sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code);
8242 /* Avoid sequence points on empty IL like .volatile */
8243 // FIXME: Enable this
8244 //if (!(cfg->cbb->last_ins && cfg->cbb->last_ins->opcode == OP_SEQ_POINT)) {
8245 NEW_SEQ_POINT (cfg, ins, ip - header->code, intr_loc);
8246 if ((sp != stack_start) && !sym_seq_point)
8247 ins->flags |= MONO_INST_NONEMPTY_STACK;
8248 MONO_ADD_INS (cfg->cbb, ins);
8251 mono_bitset_set_fast (seq_point_set_locs, ip - header->code);
8254 cfg->cbb->real_offset = cfg->real_offset;
8256 if ((cfg->method == method) && cfg->coverage_info) {
8257 guint32 cil_offset = ip - header->code;
8258 cfg->coverage_info->data [cil_offset].cil_code = ip;
8260 /* TODO: Use an increment here */
8261 #if defined(TARGET_X86)
8262 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
8263 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
8265 MONO_ADD_INS (cfg->cbb, ins);
8267 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
8268 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
8272 if (cfg->verbose_level > 3)
8273 printf ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
8277 if (seq_points && !sym_seq_points && sp != stack_start) {
8279 * The C# compiler uses these nops to notify the JIT that it should
8280 * insert seq points.
8282 NEW_SEQ_POINT (cfg, ins, ip - header->code, FALSE);
8283 MONO_ADD_INS (cfg->cbb, ins);
8285 if (cfg->keep_cil_nops)
8286 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
8288 MONO_INST_NEW (cfg, ins, OP_NOP);
8290 MONO_ADD_INS (cfg->cbb, ins);
8293 if (should_insert_brekpoint (cfg->method)) {
8294 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
8296 MONO_INST_NEW (cfg, ins, OP_NOP);
8299 MONO_ADD_INS (cfg->cbb, ins);
8305 CHECK_STACK_OVF (1);
8306 n = (*ip)-CEE_LDARG_0;
8308 EMIT_NEW_ARGLOAD (cfg, ins, n);
8316 CHECK_STACK_OVF (1);
8317 n = (*ip)-CEE_LDLOC_0;
8319 EMIT_NEW_LOCLOAD (cfg, ins, n);
8328 n = (*ip)-CEE_STLOC_0;
8331 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
8333 emit_stloc_ir (cfg, sp, header, n);
8340 CHECK_STACK_OVF (1);
8343 EMIT_NEW_ARGLOAD (cfg, ins, n);
8349 CHECK_STACK_OVF (1);
8352 NEW_ARGLOADA (cfg, ins, n);
8353 MONO_ADD_INS (cfg->cbb, ins);
8363 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
8365 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
8370 CHECK_STACK_OVF (1);
8373 EMIT_NEW_LOCLOAD (cfg, ins, n);
8377 case CEE_LDLOCA_S: {
8378 unsigned char *tmp_ip;
8380 CHECK_STACK_OVF (1);
8381 CHECK_LOCAL (ip [1]);
8383 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
8389 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
8398 CHECK_LOCAL (ip [1]);
8399 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
8401 emit_stloc_ir (cfg, sp, header, ip [1]);
8406 CHECK_STACK_OVF (1);
8407 EMIT_NEW_PCONST (cfg, ins, NULL);
8408 ins->type = STACK_OBJ;
8413 CHECK_STACK_OVF (1);
8414 EMIT_NEW_ICONST (cfg, ins, -1);
8427 CHECK_STACK_OVF (1);
8428 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
8434 CHECK_STACK_OVF (1);
8436 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
8442 CHECK_STACK_OVF (1);
8443 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
8449 CHECK_STACK_OVF (1);
8450 MONO_INST_NEW (cfg, ins, OP_I8CONST);
8451 ins->type = STACK_I8;
8452 ins->dreg = alloc_dreg (cfg, STACK_I8);
8454 ins->inst_l = (gint64)read64 (ip);
8455 MONO_ADD_INS (cfg->cbb, ins);
8461 gboolean use_aotconst = FALSE;
8463 #ifdef TARGET_POWERPC
8464 /* FIXME: Clean this up */
8465 if (cfg->compile_aot)
8466 use_aotconst = TRUE;
8469 /* FIXME: we should really allocate this only late in the compilation process */
8470 f = mono_domain_alloc (cfg->domain, sizeof (float));
8472 CHECK_STACK_OVF (1);
8478 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
8480 dreg = alloc_freg (cfg);
8481 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
8482 ins->type = cfg->r4_stack_type;
8484 MONO_INST_NEW (cfg, ins, OP_R4CONST);
8485 ins->type = cfg->r4_stack_type;
8486 ins->dreg = alloc_dreg (cfg, STACK_R8);
8488 MONO_ADD_INS (cfg->cbb, ins);
8498 gboolean use_aotconst = FALSE;
8500 #ifdef TARGET_POWERPC
8501 /* FIXME: Clean this up */
8502 if (cfg->compile_aot)
8503 use_aotconst = TRUE;
8506 /* FIXME: we should really allocate this only late in the compilation process */
8507 d = mono_domain_alloc (cfg->domain, sizeof (double));
8509 CHECK_STACK_OVF (1);
8515 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
8517 dreg = alloc_freg (cfg);
8518 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
8519 ins->type = STACK_R8;
8521 MONO_INST_NEW (cfg, ins, OP_R8CONST);
8522 ins->type = STACK_R8;
8523 ins->dreg = alloc_dreg (cfg, STACK_R8);
8525 MONO_ADD_INS (cfg->cbb, ins);
8534 MonoInst *temp, *store;
8536 CHECK_STACK_OVF (1);
8540 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
8541 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
8543 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
8546 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
8559 if (sp [0]->type == STACK_R8)
8560 /* we need to pop the value from the x86 FP stack */
8561 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
8567 INLINE_FAILURE ("jmp");
8568 GSHAREDVT_FAILURE (*ip);
8571 if (stack_start != sp)
8573 token = read32 (ip + 1);
8574 /* FIXME: check the signature matches */
8575 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
8577 if (!cmethod || mono_loader_get_last_error ())
8580 if (cfg->gshared && mono_method_check_context_used (cmethod))
8581 GENERIC_SHARING_FAILURE (CEE_JMP);
8583 emit_instrumentation_call (cfg, mono_profiler_method_leave);
8585 if (ARCH_HAVE_OP_TAIL_CALL) {
8586 MonoMethodSignature *fsig = mono_method_signature (cmethod);
8589 /* Handle tail calls similarly to calls */
8590 n = fsig->param_count + fsig->hasthis;
8594 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
8595 call->method = cmethod;
8596 call->tail_call = TRUE;
8597 call->signature = mono_method_signature (cmethod);
8598 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
8599 call->inst.inst_p0 = cmethod;
8600 for (i = 0; i < n; ++i)
8601 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
8603 mono_arch_emit_call (cfg, call);
8604 cfg->param_area = MAX(cfg->param_area, call->stack_usage);
8605 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
8607 for (i = 0; i < num_args; ++i)
8608 /* Prevent arguments from being optimized away */
8609 arg_array [i]->flags |= MONO_INST_VOLATILE;
8611 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
8612 ins = (MonoInst*)call;
8613 ins->inst_p0 = cmethod;
8614 MONO_ADD_INS (cfg->cbb, ins);
8618 start_new_bblock = 1;
8623 MonoMethodSignature *fsig;
8626 token = read32 (ip + 1);
8630 //GSHAREDVT_FAILURE (*ip);
8635 fsig = mini_get_signature (method, token, generic_context);
8637 if (method->dynamic && fsig->pinvoke) {
8641 * This is a call through a function pointer using a pinvoke
8642 * signature. Have to create a wrapper and call that instead.
8643 * FIXME: This is very slow, need to create a wrapper at JIT time
8644 * instead based on the signature.
8646 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
8647 EMIT_NEW_PCONST (cfg, args [1], fsig);
8649 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
8652 n = fsig->param_count + fsig->hasthis;
8656 //g_assert (!virtual || fsig->hasthis);
8660 inline_costs += 10 * num_calls++;
8663 * Making generic calls out of gsharedvt methods.
8664 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
8665 * patching gshared method addresses into a gsharedvt method.
8667 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig)) {
8669 * We pass the address to the gsharedvt trampoline in the rgctx reg
8671 MonoInst *callee = addr;
8673 if (method->wrapper_type != MONO_WRAPPER_DELEGATE_INVOKE)
8675 GSHAREDVT_FAILURE (*ip);
8677 addr = emit_get_rgctx_sig (cfg, context_used,
8678 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
8679 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, callee);
8683 /* Prevent inlining of methods with indirect calls */
8684 INLINE_FAILURE ("indirect call");
8686 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST || addr->opcode == OP_GOT_ENTRY) {
8691 * Instead of emitting an indirect call, emit a direct call
8692 * with the contents of the aotconst as the patch info.
8694 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST) {
8695 info_type = addr->inst_c1;
8696 info_data = addr->inst_p0;
8698 info_type = addr->inst_right->inst_c1;
8699 info_data = addr->inst_right->inst_left;
8702 if (info_type == MONO_PATCH_INFO_ICALL_ADDR || info_type == MONO_PATCH_INFO_JIT_ICALL_ADDR) {
8703 ins = (MonoInst*)mono_emit_abs_call (cfg, info_type, info_data, fsig, sp);
8708 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8712 /* End of call, INS should contain the result of the call, if any */
8714 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8716 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
8719 CHECK_CFG_EXCEPTION;
8723 constrained_class = NULL;
8727 case CEE_CALLVIRT: {
8728 MonoInst *addr = NULL;
8729 MonoMethodSignature *fsig = NULL;
8731 int virtual = *ip == CEE_CALLVIRT;
8732 gboolean pass_imt_from_rgctx = FALSE;
8733 MonoInst *imt_arg = NULL;
8734 MonoInst *keep_this_alive = NULL;
8735 gboolean pass_vtable = FALSE;
8736 gboolean pass_mrgctx = FALSE;
8737 MonoInst *vtable_arg = NULL;
8738 gboolean check_this = FALSE;
8739 gboolean supported_tail_call = FALSE;
8740 gboolean tail_call = FALSE;
8741 gboolean need_seq_point = FALSE;
8742 guint32 call_opcode = *ip;
8743 gboolean emit_widen = TRUE;
8744 gboolean push_res = TRUE;
8745 gboolean skip_ret = FALSE;
8746 gboolean delegate_invoke = FALSE;
8747 gboolean direct_icall = FALSE;
8748 gboolean constrained_partial_call = FALSE;
8749 MonoMethod *cil_method;
8752 token = read32 (ip + 1);
8756 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
8757 cil_method = cmethod;
8759 if (constrained_class) {
8760 if ((constrained_class->byval_arg.type == MONO_TYPE_VAR || constrained_class->byval_arg.type == MONO_TYPE_MVAR) && cfg->gshared) {
8761 if (!mini_is_gsharedvt_klass (constrained_class)) {
8762 g_assert (!cmethod->klass->valuetype);
8763 if (!mini_type_is_reference (&constrained_class->byval_arg))
8764 constrained_partial_call = TRUE;
8768 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8769 if (cfg->verbose_level > 2)
8770 printf ("DM Constrained call to %s\n", mono_type_get_full_name (constrained_class));
8771 if (!((constrained_class->byval_arg.type == MONO_TYPE_VAR ||
8772 constrained_class->byval_arg.type == MONO_TYPE_MVAR) &&
8774 cmethod = mono_get_method_constrained_with_method (image, cil_method, constrained_class, generic_context, &cfg->error);
8778 if (cfg->verbose_level > 2)
8779 printf ("Constrained call to %s\n", mono_type_get_full_name (constrained_class));
8781 if ((constrained_class->byval_arg.type == MONO_TYPE_VAR || constrained_class->byval_arg.type == MONO_TYPE_MVAR) && cfg->gshared) {
8783 * This is needed since get_method_constrained can't find
8784 * the method in klass representing a type var.
8785 * The type var is guaranteed to be a reference type in this
8788 if (!mini_is_gsharedvt_klass (constrained_class))
8789 g_assert (!cmethod->klass->valuetype);
8791 cmethod = mono_get_method_constrained_checked (image, token, constrained_class, generic_context, &cil_method, &cfg->error);
8797 if (!cmethod || mono_loader_get_last_error ())
8799 if (!dont_verify && !cfg->skip_visibility) {
8800 MonoMethod *target_method = cil_method;
8801 if (method->is_inflated) {
8802 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
8804 if (!mono_method_can_access_method (method_definition, target_method) &&
8805 !mono_method_can_access_method (method, cil_method))
8806 METHOD_ACCESS_FAILURE (method, cil_method);
8809 if (mono_security_core_clr_enabled ())
8810 ensure_method_is_allowed_to_call_method (cfg, method, cil_method);
8812 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
8813 /* MS.NET seems to silently convert this to a callvirt */
8818 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
8819 * converts to a callvirt.
8821 * tests/bug-515884.il is an example of this behavior
8823 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
8824 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
8825 if (!virtual && mono_class_is_marshalbyref (cmethod->klass) && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
8829 if (!cmethod->klass->inited)
8830 if (!mono_class_init (cmethod->klass))
8831 TYPE_LOAD_ERROR (cmethod->klass);
8833 fsig = mono_method_signature (cmethod);
8836 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
8837 mini_class_is_system_array (cmethod->klass)) {
8838 array_rank = cmethod->klass->rank;
8839 } else if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) && icall_is_direct_callable (cfg, cmethod)) {
8840 direct_icall = TRUE;
8841 } else if (fsig->pinvoke) {
8842 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod, TRUE, cfg->compile_aot);
8843 fsig = mono_method_signature (wrapper);
8844 } else if (constrained_class) {
8846 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
8850 mono_save_token_info (cfg, image, token, cil_method);
8852 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip + 5 - header->code)))
8853 need_seq_point = TRUE;
8855 /* Don't support calls made using type arguments for now */
8857 if (cfg->gsharedvt) {
8858 if (mini_is_gsharedvt_signature (fsig))
8859 GSHAREDVT_FAILURE (*ip);
8863 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
8864 g_assert_not_reached ();
8866 n = fsig->param_count + fsig->hasthis;
8868 if (!cfg->gshared && cmethod->klass->generic_container)
8872 g_assert (!mono_method_check_context_used (cmethod));
8876 //g_assert (!virtual || fsig->hasthis);
8880 if (constrained_class) {
8881 if (mini_is_gsharedvt_klass (constrained_class)) {
8882 if ((cmethod->klass != mono_defaults.object_class) && constrained_class->valuetype && cmethod->klass->valuetype) {
8883 /* The 'Own method' case below */
8884 } else if (cmethod->klass->image != mono_defaults.corlib && !(cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) && !cmethod->klass->valuetype) {
8885 /* 'The type parameter is instantiated as a reference type' case below. */
8887 ins = handle_constrained_gsharedvt_call (cfg, cmethod, fsig, sp, constrained_class, &emit_widen);
8888 CHECK_CFG_EXCEPTION;
8895 * We have the `constrained.' prefix opcode.
8897 if (constrained_partial_call) {
8898 gboolean need_box = TRUE;
8901 * The receiver is a valuetype, but the exact type is not known at compile time. This means the
8902 * called method is not known at compile time either. The called method could end up being
8903 * one of the methods on the parent classes (object/valuetype/enum), in which case we need
8904 * to box the receiver.
8905 * A simple solution would be to box always and make a normal virtual call, but that would
8906 * be bad performance wise.
8908 if (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE && cmethod->klass->generic_class) {
8910 * The parent classes implement no generic interfaces, so the called method will be a vtype method, so no boxing neccessary.
8915 if (!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
8916 /* The called method is not virtual, i.e. Object:GetType (), the receiver is a vtype, has to box */
8917 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
8918 ins->klass = constrained_class;
8919 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
8920 CHECK_CFG_EXCEPTION;
8921 } else if (need_box) {
8923 MonoBasicBlock *is_ref_bb, *end_bb;
8924 MonoInst *nonbox_call;
8927 * Determine at runtime whenever the called method is defined on object/valuetype/enum, and emit a boxing call
8929 * FIXME: It is possible to inline the called method in a lot of cases, i.e. for T_INT,
8930 * the no-box case goes to a method in Int32, while the box case goes to a method in Enum.
8932 addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
8934 NEW_BBLOCK (cfg, is_ref_bb);
8935 NEW_BBLOCK (cfg, end_bb);
8937 box_type = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_BOX_TYPE);
8938 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, box_type->dreg, 1);
8939 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
8942 nonbox_call = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8944 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
8947 MONO_START_BB (cfg, is_ref_bb);
8948 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
8949 ins->klass = constrained_class;
8950 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
8951 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8953 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
8955 MONO_START_BB (cfg, end_bb);
8958 nonbox_call->dreg = ins->dreg;
8961 g_assert (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE);
8962 addr = emit_get_rgctx_virt_method (cfg, mono_class_check_context_used (constrained_class), constrained_class, cmethod, MONO_RGCTX_INFO_VIRT_METHOD_CODE);
8963 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8966 } else if (constrained_class->valuetype && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
8968 * The type parameter is instantiated as a valuetype,
8969 * but that type doesn't override the method we're
8970 * calling, so we need to box `this'.
8972 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
8973 ins->klass = constrained_class;
8974 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
8975 CHECK_CFG_EXCEPTION;
8976 } else if (!constrained_class->valuetype) {
8977 int dreg = alloc_ireg_ref (cfg);
8980 * The type parameter is instantiated as a reference
8981 * type. We have a managed pointer on the stack, so
8982 * we need to dereference it here.
8984 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
8985 ins->type = STACK_OBJ;
8988 if (cmethod->klass->valuetype) {
8991 /* Interface method */
8994 mono_class_setup_vtable (constrained_class);
8995 CHECK_TYPELOAD (constrained_class);
8996 ioffset = mono_class_interface_offset (constrained_class, cmethod->klass);
8998 TYPE_LOAD_ERROR (constrained_class);
8999 slot = mono_method_get_vtable_slot (cmethod);
9001 TYPE_LOAD_ERROR (cmethod->klass);
9002 cmethod = constrained_class->vtable [ioffset + slot];
9004 if (cmethod->klass == mono_defaults.enum_class) {
9005 /* Enum implements some interfaces, so treat this as the first case */
9006 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
9007 ins->klass = constrained_class;
9008 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class));
9009 CHECK_CFG_EXCEPTION;
9014 constrained_class = NULL;
9017 if (check_call_signature (cfg, fsig, sp))
9020 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (cmethod->name, "Invoke"))
9021 delegate_invoke = TRUE;
9023 if ((cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_sharable_method (cfg, cmethod, fsig, sp))) {
9024 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9025 type_to_eval_stack_type ((cfg), fsig->ret, ins);
9033 * If the callee is a shared method, then its static cctor
9034 * might not get called after the call was patched.
9036 if (cfg->gshared && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
9037 emit_class_init (cfg, cmethod->klass);
9038 CHECK_TYPELOAD (cmethod->klass);
9041 check_method_sharing (cfg, cmethod, &pass_vtable, &pass_mrgctx);
9044 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
9046 context_used = mini_method_check_context_used (cfg, cmethod);
9048 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
9049 /* Generic method interface
9050 calls are resolved via a
9051 helper function and don't
9053 if (!cmethod_context || !cmethod_context->method_inst)
9054 pass_imt_from_rgctx = TRUE;
9058 * If a shared method calls another
9059 * shared method then the caller must
9060 * have a generic sharing context
9061 * because the magic trampoline
9062 * requires it. FIXME: We shouldn't
9063 * have to force the vtable/mrgctx
9064 * variable here. Instead there
9065 * should be a flag in the cfg to
9066 * request a generic sharing context.
9069 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
9070 mono_get_vtable_var (cfg);
9075 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
9077 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
9079 CHECK_TYPELOAD (cmethod->klass);
9080 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
9085 g_assert (!vtable_arg);
9087 if (!cfg->compile_aot) {
9089 * emit_get_rgctx_method () calls mono_class_vtable () so check
9090 * for type load errors before.
9092 mono_class_setup_vtable (cmethod->klass);
9093 CHECK_TYPELOAD (cmethod->klass);
9096 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
9098 /* !marshalbyref is needed to properly handle generic methods + remoting */
9099 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
9100 MONO_METHOD_IS_FINAL (cmethod)) &&
9101 !mono_class_is_marshalbyref (cmethod->klass)) {
9108 if (pass_imt_from_rgctx) {
9109 g_assert (!pass_vtable);
9111 imt_arg = emit_get_rgctx_method (cfg, context_used,
9112 cmethod, MONO_RGCTX_INFO_METHOD);
9116 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
9118 /* Calling virtual generic methods */
9119 if (virtual && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
9120 !(MONO_METHOD_IS_FINAL (cmethod) &&
9121 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
9122 fsig->generic_param_count &&
9123 !(cfg->gsharedvt && mini_is_gsharedvt_signature (fsig))) {
9124 MonoInst *this_temp, *this_arg_temp, *store;
9125 MonoInst *iargs [4];
9126 gboolean use_imt = FALSE;
9128 g_assert (fsig->is_inflated);
9130 /* Prevent inlining of methods that contain indirect calls */
9131 INLINE_FAILURE ("virtual generic call");
9133 if (cfg->gsharedvt && mini_is_gsharedvt_signature (fsig))
9134 GSHAREDVT_FAILURE (*ip);
9136 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK && defined(MONO_ARCH_GSHARED_SUPPORTED)
9137 if (cmethod->wrapper_type == MONO_WRAPPER_NONE)
9142 g_assert (!imt_arg);
9144 g_assert (cmethod->is_inflated);
9145 imt_arg = emit_get_rgctx_method (cfg, context_used,
9146 cmethod, MONO_RGCTX_INFO_METHOD);
9147 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, sp [0], imt_arg, NULL);
9149 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
9150 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
9151 MONO_ADD_INS (cfg->cbb, store);
9153 /* FIXME: This should be a managed pointer */
9154 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
9156 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
9157 iargs [1] = emit_get_rgctx_method (cfg, context_used,
9158 cmethod, MONO_RGCTX_INFO_METHOD);
9159 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
9160 addr = mono_emit_jit_icall (cfg,
9161 mono_helper_compile_generic_method, iargs);
9163 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
9165 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9172 * Implement a workaround for the inherent races involved in locking:
9178 * If a thread abort happens between the call to Monitor.Enter () and the start of the
9179 * try block, the Exit () won't be executed, see:
9180 * http://www.bluebytesoftware.com/blog/2007/01/30/MonitorEnterThreadAbortsAndOrphanedLocks.aspx
9181 * To work around this, we extend such try blocks to include the last x bytes
9182 * of the Monitor.Enter () call.
9184 if (cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
9185 MonoBasicBlock *tbb;
9187 GET_BBLOCK (cfg, tbb, ip + 5);
9189 * Only extend try blocks with a finally, to avoid catching exceptions thrown
9190 * from Monitor.Enter like ArgumentNullException.
9192 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
9193 /* Mark this bblock as needing to be extended */
9194 tbb->extend_try_block = TRUE;
9198 /* Conversion to a JIT intrinsic */
9199 if ((cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
9200 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9201 type_to_eval_stack_type ((cfg), fsig->ret, ins);
9208 if ((cfg->opt & MONO_OPT_INLINE) &&
9209 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
9210 mono_method_check_inlining (cfg, cmethod)) {
9212 gboolean always = FALSE;
9214 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
9215 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
9216 /* Prevent inlining of methods that call wrappers */
9217 INLINE_FAILURE ("wrapper call");
9218 cmethod = mono_marshal_get_native_wrapper (cmethod, TRUE, FALSE);
9222 costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, always);
9224 cfg->real_offset += 5;
9226 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9227 /* *sp is already set by inline_method */
9232 inline_costs += costs;
9238 /* Tail recursion elimination */
9239 if ((cfg->opt & MONO_OPT_TAILC) && call_opcode == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
9240 gboolean has_vtargs = FALSE;
9243 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
9244 INLINE_FAILURE ("tail call");
9246 /* keep it simple */
9247 for (i = fsig->param_count - 1; i >= 0; i--) {
9248 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
9253 for (i = 0; i < n; ++i)
9254 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
9255 MONO_INST_NEW (cfg, ins, OP_BR);
9256 MONO_ADD_INS (cfg->cbb, ins);
9257 tblock = start_bblock->out_bb [0];
9258 link_bblock (cfg, cfg->cbb, tblock);
9259 ins->inst_target_bb = tblock;
9260 start_new_bblock = 1;
9262 /* skip the CEE_RET, too */
9263 if (ip_in_bb (cfg, cfg->cbb, ip + 5))
9270 inline_costs += 10 * num_calls++;
9273 * Making generic calls out of gsharedvt methods.
9274 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
9275 * patching gshared method addresses into a gsharedvt method.
9277 if (cfg->gsharedvt && (mini_is_gsharedvt_signature (fsig) || cmethod->is_inflated || cmethod->klass->generic_class) &&
9278 !(cmethod->klass->rank && cmethod->klass->byval_arg.type != MONO_TYPE_SZARRAY)) {
9279 MonoRgctxInfoType info_type;
9282 //if (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)
9283 //GSHAREDVT_FAILURE (*ip);
9284 // disable for possible remoting calls
9285 if (fsig->hasthis && (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class))
9286 GSHAREDVT_FAILURE (*ip);
9287 if (fsig->generic_param_count) {
9288 /* virtual generic call */
9289 g_assert (!imt_arg);
9290 /* Same as the virtual generic case above */
9291 imt_arg = emit_get_rgctx_method (cfg, context_used,
9292 cmethod, MONO_RGCTX_INFO_METHOD);
9293 /* This is not needed, as the trampoline code will pass one, and it might be passed in the same reg as the imt arg */
9295 } else if ((cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) && !imt_arg) {
9296 /* This can happen when we call a fully instantiated iface method */
9297 imt_arg = emit_get_rgctx_method (cfg, context_used,
9298 cmethod, MONO_RGCTX_INFO_METHOD);
9303 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (cmethod->name, "Invoke")))
9304 keep_this_alive = sp [0];
9306 if (virtual && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))
9307 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE_VIRT;
9309 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE;
9310 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, info_type);
9312 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
9316 /* Generic sharing */
9319 * Use this if the callee is gsharedvt sharable too, since
9320 * at runtime we might find an instantiation so the call cannot
9321 * be patched (the 'no_patch' code path in mini-trampolines.c).
9323 if (context_used && !imt_arg && !array_rank && !delegate_invoke &&
9324 (!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
9325 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
9326 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
9327 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
9328 INLINE_FAILURE ("gshared");
9330 g_assert (cfg->gshared && cmethod);
9334 * We are compiling a call to a
9335 * generic method from shared code,
9336 * which means that we have to look up
9337 * the method in the rgctx and do an
9341 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
9343 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
9344 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
9348 /* Direct calls to icalls */
9350 MonoMethod *wrapper;
9353 /* Inline the wrapper */
9354 wrapper = mono_marshal_get_native_wrapper (cmethod, TRUE, cfg->compile_aot);
9356 costs = inline_method (cfg, wrapper, fsig, sp, ip, cfg->real_offset, TRUE);
9357 g_assert (costs > 0);
9358 cfg->real_offset += 5;
9360 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9361 /* *sp is already set by inline_method */
9366 inline_costs += costs;
9375 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
9376 MonoInst *val = sp [fsig->param_count];
9378 if (val->type == STACK_OBJ) {
9379 MonoInst *iargs [2];
9384 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
9387 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
9388 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, val->dreg);
9389 if (cfg->gen_write_barriers && val->type == STACK_OBJ && !(val->opcode == OP_PCONST && val->inst_c0 == 0))
9390 emit_write_barrier (cfg, addr, val);
9391 if (cfg->gen_write_barriers && mini_is_gsharedvt_klass (cmethod->klass))
9392 GSHAREDVT_FAILURE (*ip);
9393 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
9394 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
9396 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
9397 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
9398 if (!cmethod->klass->element_class->valuetype && !readonly)
9399 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
9400 CHECK_TYPELOAD (cmethod->klass);
9403 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
9406 g_assert_not_reached ();
9413 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
9417 /* Tail prefix / tail call optimization */
9419 /* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests */
9420 /* FIXME: runtime generic context pointer for jumps? */
9421 /* FIXME: handle this for generic sharing eventually */
9422 if ((ins_flag & MONO_INST_TAILCALL) &&
9423 !vtable_arg && !cfg->gshared && is_supported_tail_call (cfg, method, cmethod, fsig, call_opcode))
9424 supported_tail_call = TRUE;
9426 if (supported_tail_call) {
9429 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
9430 INLINE_FAILURE ("tail call");
9432 //printf ("HIT: %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
9434 if (ARCH_HAVE_OP_TAIL_CALL) {
9435 /* Handle tail calls similarly to normal calls */
9438 emit_instrumentation_call (cfg, mono_profiler_method_leave);
9440 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
9441 call->tail_call = TRUE;
9442 call->method = cmethod;
9443 call->signature = mono_method_signature (cmethod);
9446 * We implement tail calls by storing the actual arguments into the
9447 * argument variables, then emitting a CEE_JMP.
9449 for (i = 0; i < n; ++i) {
9450 /* Prevent argument from being register allocated */
9451 arg_array [i]->flags |= MONO_INST_VOLATILE;
9452 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
9454 ins = (MonoInst*)call;
9455 ins->inst_p0 = cmethod;
9456 ins->inst_p1 = arg_array [0];
9457 MONO_ADD_INS (cfg->cbb, ins);
9458 link_bblock (cfg, cfg->cbb, end_bblock);
9459 start_new_bblock = 1;
9461 // FIXME: Eliminate unreachable epilogs
9464 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
9465 * only reachable from this call.
9467 GET_BBLOCK (cfg, tblock, ip + 5);
9468 if (tblock == cfg->cbb || tblock->in_count == 0)
9477 * Synchronized wrappers.
9478 * Its hard to determine where to replace a method with its synchronized
9479 * wrapper without causing an infinite recursion. The current solution is
9480 * to add the synchronized wrapper in the trampolines, and to
9481 * change the called method to a dummy wrapper, and resolve that wrapper
9482 * to the real method in mono_jit_compile_method ().
9484 if (cfg->method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
9485 MonoMethod *orig = mono_marshal_method_from_wrapper (cfg->method);
9486 if (cmethod == orig || (cmethod->is_inflated && mono_method_get_declaring_generic_method (cmethod) == orig))
9487 cmethod = mono_marshal_get_synchronized_inner_wrapper (cmethod);
9491 INLINE_FAILURE ("call");
9492 ins = mono_emit_method_call_full (cfg, cmethod, fsig, tail_call, sp, virtual ? sp [0] : NULL,
9493 imt_arg, vtable_arg);
9496 link_bblock (cfg, cfg->cbb, end_bblock);
9497 start_new_bblock = 1;
9499 // FIXME: Eliminate unreachable epilogs
9502 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
9503 * only reachable from this call.
9505 GET_BBLOCK (cfg, tblock, ip + 5);
9506 if (tblock == cfg->cbb || tblock->in_count == 0)
9513 /* End of call, INS should contain the result of the call, if any */
9515 if (push_res && !MONO_TYPE_IS_VOID (fsig->ret)) {
9518 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
9523 if (keep_this_alive) {
9524 MonoInst *dummy_use;
9526 /* See mono_emit_method_call_full () */
9527 EMIT_NEW_DUMMY_USE (cfg, dummy_use, keep_this_alive);
9530 CHECK_CFG_EXCEPTION;
9534 g_assert (*ip == CEE_RET);
9538 constrained_class = NULL;
9540 emit_seq_point (cfg, method, ip, FALSE, TRUE);
9544 if (cfg->method != method) {
9545 /* return from inlined method */
9547 * If in_count == 0, that means the ret is unreachable due to
9548 * being preceeded by a throw. In that case, inline_method () will
9549 * handle setting the return value
9550 * (test case: test_0_inline_throw ()).
9552 if (return_var && cfg->cbb->in_count) {
9553 MonoType *ret_type = mono_method_signature (method)->ret;
9559 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
9562 //g_assert (returnvar != -1);
9563 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
9564 cfg->ret_var_set = TRUE;
9567 emit_instrumentation_call (cfg, mono_profiler_method_leave);
9569 if (cfg->lmf_var && cfg->cbb->in_count)
9573 MonoType *ret_type = mini_get_underlying_type (mono_method_signature (method)->ret);
9575 if (seq_points && !sym_seq_points) {
9577 * Place a seq point here too even through the IL stack is not
9578 * empty, so a step over on
9581 * will work correctly.
9583 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
9584 MONO_ADD_INS (cfg->cbb, ins);
9587 g_assert (!return_var);
9591 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
9594 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
9597 if (!cfg->vret_addr) {
9600 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
9602 EMIT_NEW_RETLOADA (cfg, ret_addr);
9604 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
9605 ins->klass = mono_class_from_mono_type (ret_type);
9608 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
9609 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
9610 MonoInst *iargs [1];
9614 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
9615 mono_arch_emit_setret (cfg, method, conv);
9617 mono_arch_emit_setret (cfg, method, *sp);
9620 mono_arch_emit_setret (cfg, method, *sp);
9625 if (sp != stack_start)
9627 MONO_INST_NEW (cfg, ins, OP_BR);
9629 ins->inst_target_bb = end_bblock;
9630 MONO_ADD_INS (cfg->cbb, ins);
9631 link_bblock (cfg, cfg->cbb, end_bblock);
9632 start_new_bblock = 1;
9636 MONO_INST_NEW (cfg, ins, OP_BR);
9638 target = ip + 1 + (signed char)(*ip);
9640 GET_BBLOCK (cfg, tblock, target);
9641 link_bblock (cfg, cfg->cbb, tblock);
9642 ins->inst_target_bb = tblock;
9643 if (sp != stack_start) {
9644 handle_stack_args (cfg, stack_start, sp - stack_start);
9646 CHECK_UNVERIFIABLE (cfg);
9648 MONO_ADD_INS (cfg->cbb, ins);
9649 start_new_bblock = 1;
9650 inline_costs += BRANCH_COST;
9664 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
9666 target = ip + 1 + *(signed char*)ip;
9672 inline_costs += BRANCH_COST;
9676 MONO_INST_NEW (cfg, ins, OP_BR);
9679 target = ip + 4 + (gint32)read32(ip);
9681 GET_BBLOCK (cfg, tblock, target);
9682 link_bblock (cfg, cfg->cbb, tblock);
9683 ins->inst_target_bb = tblock;
9684 if (sp != stack_start) {
9685 handle_stack_args (cfg, stack_start, sp - stack_start);
9687 CHECK_UNVERIFIABLE (cfg);
9690 MONO_ADD_INS (cfg->cbb, ins);
9692 start_new_bblock = 1;
9693 inline_costs += BRANCH_COST;
9700 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
9701 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
9702 guint32 opsize = is_short ? 1 : 4;
9704 CHECK_OPSIZE (opsize);
9706 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
9709 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
9714 GET_BBLOCK (cfg, tblock, target);
9715 link_bblock (cfg, cfg->cbb, tblock);
9716 GET_BBLOCK (cfg, tblock, ip);
9717 link_bblock (cfg, cfg->cbb, tblock);
9719 if (sp != stack_start) {
9720 handle_stack_args (cfg, stack_start, sp - stack_start);
9721 CHECK_UNVERIFIABLE (cfg);
9724 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
9725 cmp->sreg1 = sp [0]->dreg;
9726 type_from_op (cfg, cmp, sp [0], NULL);
9729 #if SIZEOF_REGISTER == 4
9730 if (cmp->opcode == OP_LCOMPARE_IMM) {
9731 /* Convert it to OP_LCOMPARE */
9732 MONO_INST_NEW (cfg, ins, OP_I8CONST);
9733 ins->type = STACK_I8;
9734 ins->dreg = alloc_dreg (cfg, STACK_I8);
9736 MONO_ADD_INS (cfg->cbb, ins);
9737 cmp->opcode = OP_LCOMPARE;
9738 cmp->sreg2 = ins->dreg;
9741 MONO_ADD_INS (cfg->cbb, cmp);
9743 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
9744 type_from_op (cfg, ins, sp [0], NULL);
9745 MONO_ADD_INS (cfg->cbb, ins);
9746 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
9747 GET_BBLOCK (cfg, tblock, target);
9748 ins->inst_true_bb = tblock;
9749 GET_BBLOCK (cfg, tblock, ip);
9750 ins->inst_false_bb = tblock;
9751 start_new_bblock = 2;
9754 inline_costs += BRANCH_COST;
9769 MONO_INST_NEW (cfg, ins, *ip);
9771 target = ip + 4 + (gint32)read32(ip);
9777 inline_costs += BRANCH_COST;
9781 MonoBasicBlock **targets;
9782 MonoBasicBlock *default_bblock;
9783 MonoJumpInfoBBTable *table;
9784 int offset_reg = alloc_preg (cfg);
9785 int target_reg = alloc_preg (cfg);
9786 int table_reg = alloc_preg (cfg);
9787 int sum_reg = alloc_preg (cfg);
9788 gboolean use_op_switch;
9792 n = read32 (ip + 1);
9795 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
9799 CHECK_OPSIZE (n * sizeof (guint32));
9800 target = ip + n * sizeof (guint32);
9802 GET_BBLOCK (cfg, default_bblock, target);
9803 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
9805 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
9806 for (i = 0; i < n; ++i) {
9807 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
9808 targets [i] = tblock;
9809 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
9813 if (sp != stack_start) {
9815 * Link the current bb with the targets as well, so handle_stack_args
9816 * will set their in_stack correctly.
9818 link_bblock (cfg, cfg->cbb, default_bblock);
9819 for (i = 0; i < n; ++i)
9820 link_bblock (cfg, cfg->cbb, targets [i]);
9822 handle_stack_args (cfg, stack_start, sp - stack_start);
9824 CHECK_UNVERIFIABLE (cfg);
9827 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
9828 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
9830 for (i = 0; i < n; ++i)
9831 link_bblock (cfg, cfg->cbb, targets [i]);
9833 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
9834 table->table = targets;
9835 table->table_size = n;
9837 use_op_switch = FALSE;
9839 /* ARM implements SWITCH statements differently */
9840 /* FIXME: Make it use the generic implementation */
9841 if (!cfg->compile_aot)
9842 use_op_switch = TRUE;
9845 if (COMPILE_LLVM (cfg))
9846 use_op_switch = TRUE;
9848 cfg->cbb->has_jump_table = 1;
9850 if (use_op_switch) {
9851 MONO_INST_NEW (cfg, ins, OP_SWITCH);
9852 ins->sreg1 = src1->dreg;
9853 ins->inst_p0 = table;
9854 ins->inst_many_bb = targets;
9855 ins->klass = GUINT_TO_POINTER (n);
9856 MONO_ADD_INS (cfg->cbb, ins);
9858 if (sizeof (gpointer) == 8)
9859 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
9861 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
9863 #if SIZEOF_REGISTER == 8
9864 /* The upper word might not be zero, and we add it to a 64 bit address later */
9865 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
9868 if (cfg->compile_aot) {
9869 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
9871 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
9872 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
9873 ins->inst_p0 = table;
9874 ins->dreg = table_reg;
9875 MONO_ADD_INS (cfg->cbb, ins);
9878 /* FIXME: Use load_memindex */
9879 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
9880 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
9881 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
9883 start_new_bblock = 1;
9884 inline_costs += (BRANCH_COST * 2);
9904 dreg = alloc_freg (cfg);
9907 dreg = alloc_lreg (cfg);
9910 dreg = alloc_ireg_ref (cfg);
9913 dreg = alloc_preg (cfg);
9916 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
9917 ins->type = ldind_type [*ip - CEE_LDIND_I1];
9918 if (*ip == CEE_LDIND_R4)
9919 ins->type = cfg->r4_stack_type;
9920 ins->flags |= ins_flag;
9921 MONO_ADD_INS (cfg->cbb, ins);
9923 if (ins_flag & MONO_INST_VOLATILE) {
9924 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
9925 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
9941 if (ins_flag & MONO_INST_VOLATILE) {
9942 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
9943 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
9946 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
9947 ins->flags |= ins_flag;
9950 MONO_ADD_INS (cfg->cbb, ins);
9952 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0)))
9953 emit_write_barrier (cfg, sp [0], sp [1]);
9962 MONO_INST_NEW (cfg, ins, (*ip));
9964 ins->sreg1 = sp [0]->dreg;
9965 ins->sreg2 = sp [1]->dreg;
9966 type_from_op (cfg, ins, sp [0], sp [1]);
9968 ins->dreg = alloc_dreg ((cfg), (ins)->type);
9970 /* Use the immediate opcodes if possible */
9971 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
9972 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
9973 if (imm_opcode != -1) {
9974 ins->opcode = imm_opcode;
9975 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
9978 NULLIFY_INS (sp [1]);
9982 MONO_ADD_INS ((cfg)->cbb, (ins));
9984 *sp++ = mono_decompose_opcode (cfg, ins);
10001 MONO_INST_NEW (cfg, ins, (*ip));
10003 ins->sreg1 = sp [0]->dreg;
10004 ins->sreg2 = sp [1]->dreg;
10005 type_from_op (cfg, ins, sp [0], sp [1]);
10007 add_widen_op (cfg, ins, &sp [0], &sp [1]);
10008 ins->dreg = alloc_dreg ((cfg), (ins)->type);
10010 /* FIXME: Pass opcode to is_inst_imm */
10012 /* Use the immediate opcodes if possible */
10013 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
10016 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
10017 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
10018 /* Keep emulated opcodes which are optimized away later */
10019 if ((ins->opcode == OP_IREM_UN || ins->opcode == OP_IDIV_UN_IMM) && (cfg->opt & (MONO_OPT_CONSPROP | MONO_OPT_COPYPROP)) && sp [1]->opcode == OP_ICONST && mono_is_power_of_two (sp [1]->inst_c0) >= 0) {
10020 imm_opcode = mono_op_to_op_imm (ins->opcode);
10023 if (imm_opcode != -1) {
10024 ins->opcode = imm_opcode;
10025 if (sp [1]->opcode == OP_I8CONST) {
10026 #if SIZEOF_REGISTER == 8
10027 ins->inst_imm = sp [1]->inst_l;
10029 ins->inst_ls_word = sp [1]->inst_ls_word;
10030 ins->inst_ms_word = sp [1]->inst_ms_word;
10034 ins->inst_imm = (gssize)(sp [1]->inst_c0);
10037 /* Might be followed by an instruction added by add_widen_op */
10038 if (sp [1]->next == NULL)
10039 NULLIFY_INS (sp [1]);
10042 MONO_ADD_INS ((cfg)->cbb, (ins));
10044 *sp++ = mono_decompose_opcode (cfg, ins);
10057 case CEE_CONV_OVF_I8:
10058 case CEE_CONV_OVF_U8:
10059 case CEE_CONV_R_UN:
10062 /* Special case this earlier so we have long constants in the IR */
10063 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
10064 int data = sp [-1]->inst_c0;
10065 sp [-1]->opcode = OP_I8CONST;
10066 sp [-1]->type = STACK_I8;
10067 #if SIZEOF_REGISTER == 8
10068 if ((*ip) == CEE_CONV_U8)
10069 sp [-1]->inst_c0 = (guint32)data;
10071 sp [-1]->inst_c0 = data;
10073 sp [-1]->inst_ls_word = data;
10074 if ((*ip) == CEE_CONV_U8)
10075 sp [-1]->inst_ms_word = 0;
10077 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
10079 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
10086 case CEE_CONV_OVF_I4:
10087 case CEE_CONV_OVF_I1:
10088 case CEE_CONV_OVF_I2:
10089 case CEE_CONV_OVF_I:
10090 case CEE_CONV_OVF_U:
10093 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
10094 ADD_UNOP (CEE_CONV_OVF_I8);
10101 case CEE_CONV_OVF_U1:
10102 case CEE_CONV_OVF_U2:
10103 case CEE_CONV_OVF_U4:
10106 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
10107 ADD_UNOP (CEE_CONV_OVF_U8);
10114 case CEE_CONV_OVF_I1_UN:
10115 case CEE_CONV_OVF_I2_UN:
10116 case CEE_CONV_OVF_I4_UN:
10117 case CEE_CONV_OVF_I8_UN:
10118 case CEE_CONV_OVF_U1_UN:
10119 case CEE_CONV_OVF_U2_UN:
10120 case CEE_CONV_OVF_U4_UN:
10121 case CEE_CONV_OVF_U8_UN:
10122 case CEE_CONV_OVF_I_UN:
10123 case CEE_CONV_OVF_U_UN:
10130 CHECK_CFG_EXCEPTION;
10134 case CEE_ADD_OVF_UN:
10136 case CEE_MUL_OVF_UN:
10138 case CEE_SUB_OVF_UN:
10144 GSHAREDVT_FAILURE (*ip);
10147 token = read32 (ip + 1);
10148 klass = mini_get_class (method, token, generic_context);
10149 CHECK_TYPELOAD (klass);
10151 if (generic_class_is_reference_type (cfg, klass)) {
10152 MonoInst *store, *load;
10153 int dreg = alloc_ireg_ref (cfg);
10155 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
10156 load->flags |= ins_flag;
10157 MONO_ADD_INS (cfg->cbb, load);
10159 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
10160 store->flags |= ins_flag;
10161 MONO_ADD_INS (cfg->cbb, store);
10163 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER)
10164 emit_write_barrier (cfg, sp [0], sp [1]);
10166 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
10172 int loc_index = -1;
10178 token = read32 (ip + 1);
10179 klass = mini_get_class (method, token, generic_context);
10180 CHECK_TYPELOAD (klass);
10182 /* Optimize the common ldobj+stloc combination */
10185 loc_index = ip [6];
10192 loc_index = ip [5] - CEE_STLOC_0;
10199 if ((loc_index != -1) && ip_in_bb (cfg, cfg->cbb, ip + 5)) {
10200 CHECK_LOCAL (loc_index);
10202 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
10203 ins->dreg = cfg->locals [loc_index]->dreg;
10204 ins->flags |= ins_flag;
10207 if (ins_flag & MONO_INST_VOLATILE) {
10208 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10209 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10215 /* Optimize the ldobj+stobj combination */
10216 /* The reference case ends up being a load+store anyway */
10217 /* Skip this if the operation is volatile. */
10218 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass) && !(ins_flag & MONO_INST_VOLATILE)) {
10223 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
10230 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
10231 ins->flags |= ins_flag;
10234 if (ins_flag & MONO_INST_VOLATILE) {
10235 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10236 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10245 CHECK_STACK_OVF (1);
10247 n = read32 (ip + 1);
10249 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
10250 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
10251 ins->type = STACK_OBJ;
10254 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
10255 MonoInst *iargs [1];
10256 char *str = mono_method_get_wrapper_data (method, n);
10258 if (cfg->compile_aot)
10259 EMIT_NEW_LDSTRLITCONST (cfg, iargs [0], str);
10261 EMIT_NEW_PCONST (cfg, iargs [0], str);
10262 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
10264 if (cfg->opt & MONO_OPT_SHARED) {
10265 MonoInst *iargs [3];
10267 if (cfg->compile_aot) {
10268 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
10270 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10271 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
10272 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
10273 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
10274 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
10276 if (cfg->cbb->out_of_line) {
10277 MonoInst *iargs [2];
10279 if (image == mono_defaults.corlib) {
10281 * Avoid relocations in AOT and save some space by using a
10282 * version of helper_ldstr specialized to mscorlib.
10284 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
10285 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
10287 /* Avoid creating the string object */
10288 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
10289 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
10290 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
10294 if (cfg->compile_aot) {
10295 NEW_LDSTRCONST (cfg, ins, image, n);
10297 MONO_ADD_INS (cfg->cbb, ins);
10300 NEW_PCONST (cfg, ins, NULL);
10301 ins->type = STACK_OBJ;
10302 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
10304 OUT_OF_MEMORY_FAILURE;
10307 MONO_ADD_INS (cfg->cbb, ins);
10316 MonoInst *iargs [2];
10317 MonoMethodSignature *fsig;
10320 MonoInst *vtable_arg = NULL;
10323 token = read32 (ip + 1);
10324 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
10325 if (!cmethod || mono_loader_get_last_error ())
10327 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
10330 mono_save_token_info (cfg, image, token, cmethod);
10332 if (!mono_class_init (cmethod->klass))
10333 TYPE_LOAD_ERROR (cmethod->klass);
10335 context_used = mini_method_check_context_used (cfg, cmethod);
10337 if (mono_security_core_clr_enabled ())
10338 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
10340 if (cfg->gshared && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
10341 emit_class_init (cfg, cmethod->klass);
10342 CHECK_TYPELOAD (cmethod->klass);
10346 if (cfg->gsharedvt) {
10347 if (mini_is_gsharedvt_variable_signature (sig))
10348 GSHAREDVT_FAILURE (*ip);
10352 n = fsig->param_count;
10356 * Generate smaller code for the common newobj <exception> instruction in
10357 * argument checking code.
10359 if (cfg->cbb->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
10360 is_exception_class (cmethod->klass) && n <= 2 &&
10361 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
10362 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
10363 MonoInst *iargs [3];
10367 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
10370 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
10373 iargs [1] = sp [0];
10374 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
10377 iargs [1] = sp [0];
10378 iargs [2] = sp [1];
10379 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
10382 g_assert_not_reached ();
10390 /* move the args to allow room for 'this' in the first position */
10396 /* check_call_signature () requires sp[0] to be set */
10397 this_ins.type = STACK_OBJ;
10398 sp [0] = &this_ins;
10399 if (check_call_signature (cfg, fsig, sp))
10404 if (mini_class_is_system_array (cmethod->klass)) {
10405 *sp = emit_get_rgctx_method (cfg, context_used,
10406 cmethod, MONO_RGCTX_INFO_METHOD);
10408 /* Avoid varargs in the common case */
10409 if (fsig->param_count == 1)
10410 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
10411 else if (fsig->param_count == 2)
10412 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
10413 else if (fsig->param_count == 3)
10414 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
10415 else if (fsig->param_count == 4)
10416 alloc = mono_emit_jit_icall (cfg, mono_array_new_4, sp);
10418 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
10419 } else if (cmethod->string_ctor) {
10420 g_assert (!context_used);
10421 g_assert (!vtable_arg);
10422 /* we simply pass a null pointer */
10423 EMIT_NEW_PCONST (cfg, *sp, NULL);
10424 /* now call the string ctor */
10425 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, NULL, NULL, NULL);
10427 if (cmethod->klass->valuetype) {
10428 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
10429 emit_init_rvar (cfg, iargs [0]->dreg, &cmethod->klass->byval_arg);
10430 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
10435 * The code generated by mini_emit_virtual_call () expects
10436 * iargs [0] to be a boxed instance, but luckily the vcall
10437 * will be transformed into a normal call there.
10439 } else if (context_used) {
10440 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
10443 MonoVTable *vtable = NULL;
10445 if (!cfg->compile_aot)
10446 vtable = mono_class_vtable (cfg->domain, cmethod->klass);
10447 CHECK_TYPELOAD (cmethod->klass);
10450 * TypeInitializationExceptions thrown from the mono_runtime_class_init
10451 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
10452 * As a workaround, we call class cctors before allocating objects.
10454 if (mini_field_access_needs_cctor_run (cfg, method, cmethod->klass, vtable) && !(g_slist_find (class_inits, cmethod->klass))) {
10455 emit_class_init (cfg, cmethod->klass);
10456 if (cfg->verbose_level > 2)
10457 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
10458 class_inits = g_slist_prepend (class_inits, cmethod->klass);
10461 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
10464 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
10467 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
10469 /* Now call the actual ctor */
10470 handle_ctor_call (cfg, cmethod, fsig, context_used, sp, ip, &inline_costs);
10471 CHECK_CFG_EXCEPTION;
10474 if (alloc == NULL) {
10476 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
10477 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
10485 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip - header->code)))
10486 emit_seq_point (cfg, method, ip, FALSE, TRUE);
10489 case CEE_CASTCLASS:
10493 token = read32 (ip + 1);
10494 klass = mini_get_class (method, token, generic_context);
10495 CHECK_TYPELOAD (klass);
10496 if (sp [0]->type != STACK_OBJ)
10499 ins = handle_castclass (cfg, klass, *sp, ip, &inline_costs);
10500 CHECK_CFG_EXCEPTION;
10509 token = read32 (ip + 1);
10510 klass = mini_get_class (method, token, generic_context);
10511 CHECK_TYPELOAD (klass);
10512 if (sp [0]->type != STACK_OBJ)
10515 context_used = mini_class_check_context_used (cfg, klass);
10517 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
10518 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
10519 MonoInst *args [3];
10526 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
10529 if (cfg->compile_aot) {
10530 idx = get_castclass_cache_idx (cfg);
10531 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, GINT_TO_POINTER (idx));
10533 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
10536 *sp++ = mono_emit_method_call (cfg, mono_isinst, args, NULL);
10539 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
10540 MonoMethod *mono_isinst;
10541 MonoInst *iargs [1];
10544 mono_isinst = mono_marshal_get_isinst (klass);
10545 iargs [0] = sp [0];
10547 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
10548 iargs, ip, cfg->real_offset, TRUE);
10549 CHECK_CFG_EXCEPTION;
10550 g_assert (costs > 0);
10553 cfg->real_offset += 5;
10557 inline_costs += costs;
10560 ins = handle_isinst (cfg, klass, *sp, context_used);
10561 CHECK_CFG_EXCEPTION;
10567 case CEE_UNBOX_ANY: {
10568 MonoInst *res, *addr;
10573 token = read32 (ip + 1);
10574 klass = mini_get_class (method, token, generic_context);
10575 CHECK_TYPELOAD (klass);
10577 mono_save_token_info (cfg, image, token, klass);
10579 context_used = mini_class_check_context_used (cfg, klass);
10581 if (mini_is_gsharedvt_klass (klass)) {
10582 res = handle_unbox_gsharedvt (cfg, klass, *sp);
10584 } else if (generic_class_is_reference_type (cfg, klass)) {
10585 res = handle_castclass (cfg, klass, *sp, ip, &inline_costs);
10586 CHECK_CFG_EXCEPTION;
10587 } else if (mono_class_is_nullable (klass)) {
10588 res = handle_unbox_nullable (cfg, *sp, klass, context_used);
10590 addr = handle_unbox (cfg, klass, sp, context_used);
10592 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
10603 MonoClass *enum_class;
10604 MonoMethod *has_flag;
10610 token = read32 (ip + 1);
10611 klass = mini_get_class (method, token, generic_context);
10612 CHECK_TYPELOAD (klass);
10614 mono_save_token_info (cfg, image, token, klass);
10616 context_used = mini_class_check_context_used (cfg, klass);
10618 if (generic_class_is_reference_type (cfg, klass)) {
10624 if (klass == mono_defaults.void_class)
10626 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
10628 /* frequent check in generic code: box (struct), brtrue */
10633 * <push int/long ptr>
10636 * constrained. MyFlags
10637 * callvirt instace bool class [mscorlib] System.Enum::HasFlag (class [mscorlib] System.Enum)
10639 * If we find this sequence and the operand types on box and constrained
10640 * are equal, we can emit a specialized instruction sequence instead of
10641 * the very slow HasFlag () call.
10643 if ((cfg->opt & MONO_OPT_INTRINS) &&
10644 /* Cheap checks first. */
10645 ip + 5 + 6 + 5 < end &&
10646 ip [5] == CEE_PREFIX1 &&
10647 ip [6] == CEE_CONSTRAINED_ &&
10648 ip [11] == CEE_CALLVIRT &&
10649 ip_in_bb (cfg, cfg->cbb, ip + 5 + 6 + 5) &&
10650 mono_class_is_enum (klass) &&
10651 (enum_class = mini_get_class (method, read32 (ip + 7), generic_context)) &&
10652 (has_flag = mini_get_method (cfg, method, read32 (ip + 12), NULL, generic_context)) &&
10653 has_flag->klass == mono_defaults.enum_class &&
10654 !strcmp (has_flag->name, "HasFlag") &&
10655 has_flag->signature->hasthis &&
10656 has_flag->signature->param_count == 1) {
10657 CHECK_TYPELOAD (enum_class);
10659 if (enum_class == klass) {
10660 MonoInst *enum_this, *enum_flag;
10665 enum_this = sp [0];
10666 enum_flag = sp [1];
10668 *sp++ = handle_enum_has_flag (cfg, klass, enum_this, enum_flag);
10673 // FIXME: LLVM can't handle the inconsistent bb linking
10674 if (!mono_class_is_nullable (klass) &&
10675 ip + 5 < end && ip_in_bb (cfg, cfg->cbb, ip + 5) &&
10676 (ip [5] == CEE_BRTRUE ||
10677 ip [5] == CEE_BRTRUE_S ||
10678 ip [5] == CEE_BRFALSE ||
10679 ip [5] == CEE_BRFALSE_S)) {
10680 gboolean is_true = ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S;
10682 MonoBasicBlock *true_bb, *false_bb;
10686 if (cfg->verbose_level > 3) {
10687 printf ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
10688 printf ("<box+brtrue opt>\n");
10693 case CEE_BRFALSE_S:
10696 target = ip + 1 + (signed char)(*ip);
10703 target = ip + 4 + (gint)(read32 (ip));
10707 g_assert_not_reached ();
10711 * We need to link both bblocks, since it is needed for handling stack
10712 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
10713 * Branching to only one of them would lead to inconsistencies, so
10714 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
10716 GET_BBLOCK (cfg, true_bb, target);
10717 GET_BBLOCK (cfg, false_bb, ip);
10719 mono_link_bblock (cfg, cfg->cbb, true_bb);
10720 mono_link_bblock (cfg, cfg->cbb, false_bb);
10722 if (sp != stack_start) {
10723 handle_stack_args (cfg, stack_start, sp - stack_start);
10725 CHECK_UNVERIFIABLE (cfg);
10728 if (COMPILE_LLVM (cfg)) {
10729 dreg = alloc_ireg (cfg);
10730 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
10731 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
10733 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
10735 /* The JIT can't eliminate the iconst+compare */
10736 MONO_INST_NEW (cfg, ins, OP_BR);
10737 ins->inst_target_bb = is_true ? true_bb : false_bb;
10738 MONO_ADD_INS (cfg->cbb, ins);
10741 start_new_bblock = 1;
10745 *sp++ = handle_box (cfg, val, klass, context_used);
10747 CHECK_CFG_EXCEPTION;
10756 token = read32 (ip + 1);
10757 klass = mini_get_class (method, token, generic_context);
10758 CHECK_TYPELOAD (klass);
10760 mono_save_token_info (cfg, image, token, klass);
10762 context_used = mini_class_check_context_used (cfg, klass);
10764 if (mono_class_is_nullable (klass)) {
10767 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
10768 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
10772 ins = handle_unbox (cfg, klass, sp, context_used);
10785 MonoClassField *field;
10786 #ifndef DISABLE_REMOTING
10790 gboolean is_instance;
10792 gpointer addr = NULL;
10793 gboolean is_special_static;
10795 MonoInst *store_val = NULL;
10796 MonoInst *thread_ins;
10799 is_instance = (op == CEE_LDFLD || op == CEE_LDFLDA || op == CEE_STFLD);
10801 if (op == CEE_STFLD) {
10804 store_val = sp [1];
10809 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
10811 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
10814 if (op == CEE_STSFLD) {
10817 store_val = sp [0];
10822 token = read32 (ip + 1);
10823 if (method->wrapper_type != MONO_WRAPPER_NONE) {
10824 field = mono_method_get_wrapper_data (method, token);
10825 klass = field->parent;
10828 field = mono_field_from_token_checked (image, token, &klass, generic_context, &cfg->error);
10831 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
10832 FIELD_ACCESS_FAILURE (method, field);
10833 mono_class_init (klass);
10835 /* if the class is Critical then transparent code cannot access it's fields */
10836 if (!is_instance && mono_security_core_clr_enabled ())
10837 ensure_method_is_allowed_to_access_field (cfg, method, field);
10839 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
10840 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
10841 if (mono_security_core_clr_enabled ())
10842 ensure_method_is_allowed_to_access_field (cfg, method, field);
10846 * LDFLD etc. is usable on static fields as well, so convert those cases to
10849 if (is_instance && field->type->attrs & FIELD_ATTRIBUTE_STATIC) {
10861 g_assert_not_reached ();
10863 is_instance = FALSE;
10866 context_used = mini_class_check_context_used (cfg, klass);
10868 /* INSTANCE CASE */
10870 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
10871 if (op == CEE_STFLD) {
10872 if (target_type_is_incompatible (cfg, field->type, sp [1]))
10874 #ifndef DISABLE_REMOTING
10875 if ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class) {
10876 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
10877 MonoInst *iargs [5];
10879 GSHAREDVT_FAILURE (op);
10881 iargs [0] = sp [0];
10882 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10883 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
10884 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
10886 iargs [4] = sp [1];
10888 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
10889 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
10890 iargs, ip, cfg->real_offset, TRUE);
10891 CHECK_CFG_EXCEPTION;
10892 g_assert (costs > 0);
10894 cfg->real_offset += 5;
10896 inline_costs += costs;
10898 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
10905 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
10907 if (mini_is_gsharedvt_klass (klass)) {
10908 MonoInst *offset_ins;
10910 context_used = mini_class_check_context_used (cfg, klass);
10912 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10913 dreg = alloc_ireg_mp (cfg);
10914 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10915 /* The decomposition will call mini_emit_stobj () which will emit a wbarrier if needed */
10916 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, dreg, 0, sp [1]->dreg);
10918 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
10920 if (sp [0]->opcode != OP_LDADDR)
10921 store->flags |= MONO_INST_FAULT;
10923 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
10924 /* insert call to write barrier */
10928 dreg = alloc_ireg_mp (cfg);
10929 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
10930 emit_write_barrier (cfg, ptr, sp [1]);
10933 store->flags |= ins_flag;
10940 #ifndef DISABLE_REMOTING
10941 if (is_instance && ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class)) {
10942 MonoMethod *wrapper = (op == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
10943 MonoInst *iargs [4];
10945 GSHAREDVT_FAILURE (op);
10947 iargs [0] = sp [0];
10948 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10949 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
10950 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
10951 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
10952 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
10953 iargs, ip, cfg->real_offset, TRUE);
10954 CHECK_CFG_EXCEPTION;
10955 g_assert (costs > 0);
10957 cfg->real_offset += 5;
10961 inline_costs += costs;
10963 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
10969 if (sp [0]->type == STACK_VTYPE) {
10972 /* Have to compute the address of the variable */
10974 var = get_vreg_to_inst (cfg, sp [0]->dreg);
10976 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
10978 g_assert (var->klass == klass);
10980 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
10984 if (op == CEE_LDFLDA) {
10985 if (sp [0]->type == STACK_OBJ) {
10986 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
10987 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
10990 dreg = alloc_ireg_mp (cfg);
10992 if (mini_is_gsharedvt_klass (klass)) {
10993 MonoInst *offset_ins;
10995 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10996 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10998 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
11000 ins->klass = mono_class_from_mono_type (field->type);
11001 ins->type = STACK_MP;
11006 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
11008 if (mini_is_gsharedvt_klass (klass)) {
11009 MonoInst *offset_ins;
11011 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
11012 dreg = alloc_ireg_mp (cfg);
11013 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
11014 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, dreg, 0);
11016 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
11018 load->flags |= ins_flag;
11019 if (sp [0]->opcode != OP_LDADDR)
11020 load->flags |= MONO_INST_FAULT;
11032 context_used = mini_class_check_context_used (cfg, klass);
11034 ftype = mono_field_get_type (field);
11036 if (ftype->attrs & FIELD_ATTRIBUTE_LITERAL)
11039 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
11040 * to be called here.
11042 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
11043 mono_class_vtable (cfg->domain, klass);
11044 CHECK_TYPELOAD (klass);
11046 mono_domain_lock (cfg->domain);
11047 if (cfg->domain->special_static_fields)
11048 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
11049 mono_domain_unlock (cfg->domain);
11051 is_special_static = mono_class_field_is_special_static (field);
11053 if (is_special_static && ((gsize)addr & 0x80000000) == 0)
11054 thread_ins = mono_get_thread_intrinsic (cfg);
11058 /* Generate IR to compute the field address */
11059 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && thread_ins && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
11061 * Fast access to TLS data
11062 * Inline version of get_thread_static_data () in
11066 int idx, static_data_reg, array_reg, dreg;
11068 GSHAREDVT_FAILURE (op);
11070 MONO_ADD_INS (cfg->cbb, thread_ins);
11071 static_data_reg = alloc_ireg (cfg);
11072 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, MONO_STRUCT_OFFSET (MonoInternalThread, static_data));
11074 if (cfg->compile_aot) {
11075 int offset_reg, offset2_reg, idx_reg;
11077 /* For TLS variables, this will return the TLS offset */
11078 EMIT_NEW_SFLDACONST (cfg, ins, field);
11079 offset_reg = ins->dreg;
11080 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
11081 idx_reg = alloc_ireg (cfg);
11082 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, idx_reg, offset_reg, 0x3f);
11083 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
11084 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
11085 array_reg = alloc_ireg (cfg);
11086 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
11087 offset2_reg = alloc_ireg (cfg);
11088 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_UN_IMM, offset2_reg, offset_reg, 6);
11089 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset2_reg, 0x1ffffff);
11090 dreg = alloc_ireg (cfg);
11091 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
11093 offset = (gsize)addr & 0x7fffffff;
11094 idx = offset & 0x3f;
11096 array_reg = alloc_ireg (cfg);
11097 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
11098 dreg = alloc_ireg (cfg);
11099 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, ((offset >> 6) & 0x1ffffff));
11101 } else if ((cfg->opt & MONO_OPT_SHARED) ||
11102 (cfg->compile_aot && is_special_static) ||
11103 (context_used && is_special_static)) {
11104 MonoInst *iargs [2];
11106 g_assert (field->parent);
11107 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11108 if (context_used) {
11109 iargs [1] = emit_get_rgctx_field (cfg, context_used,
11110 field, MONO_RGCTX_INFO_CLASS_FIELD);
11112 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
11114 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
11115 } else if (context_used) {
11116 MonoInst *static_data;
11119 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
11120 method->klass->name_space, method->klass->name, method->name,
11121 depth, field->offset);
11124 if (mono_class_needs_cctor_run (klass, method))
11125 emit_class_init (cfg, klass);
11128 * The pointer we're computing here is
11130 * super_info.static_data + field->offset
11132 static_data = emit_get_rgctx_klass (cfg, context_used,
11133 klass, MONO_RGCTX_INFO_STATIC_DATA);
11135 if (mini_is_gsharedvt_klass (klass)) {
11136 MonoInst *offset_ins;
11138 offset_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET);
11139 dreg = alloc_ireg_mp (cfg);
11140 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, static_data->dreg, offset_ins->dreg);
11141 } else if (field->offset == 0) {
11144 int addr_reg = mono_alloc_preg (cfg);
11145 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
11147 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
11148 MonoInst *iargs [2];
11150 g_assert (field->parent);
11151 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11152 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
11153 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
11155 MonoVTable *vtable = NULL;
11157 if (!cfg->compile_aot)
11158 vtable = mono_class_vtable (cfg->domain, klass);
11159 CHECK_TYPELOAD (klass);
11162 if (mini_field_access_needs_cctor_run (cfg, method, klass, vtable)) {
11163 if (!(g_slist_find (class_inits, klass))) {
11164 emit_class_init (cfg, klass);
11165 if (cfg->verbose_level > 2)
11166 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
11167 class_inits = g_slist_prepend (class_inits, klass);
11170 if (cfg->run_cctors) {
11172 /* This makes so that inline cannot trigger */
11173 /* .cctors: too many apps depend on them */
11174 /* running with a specific order... */
11176 if (! vtable->initialized)
11177 INLINE_FAILURE ("class init");
11178 ex = mono_runtime_class_init_full (vtable, FALSE);
11180 set_exception_object (cfg, ex);
11181 goto exception_exit;
11185 if (cfg->compile_aot)
11186 EMIT_NEW_SFLDACONST (cfg, ins, field);
11189 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
11191 EMIT_NEW_PCONST (cfg, ins, addr);
11194 MonoInst *iargs [1];
11195 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
11196 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
11200 /* Generate IR to do the actual load/store operation */
11202 if ((op == CEE_STFLD || op == CEE_STSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
11203 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
11204 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
11207 if (op == CEE_LDSFLDA) {
11208 ins->klass = mono_class_from_mono_type (ftype);
11209 ins->type = STACK_PTR;
11211 } else if (op == CEE_STSFLD) {
11214 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, ftype, ins->dreg, 0, store_val->dreg);
11215 store->flags |= ins_flag;
11217 gboolean is_const = FALSE;
11218 MonoVTable *vtable = NULL;
11219 gpointer addr = NULL;
11221 if (!context_used) {
11222 vtable = mono_class_vtable (cfg->domain, klass);
11223 CHECK_TYPELOAD (klass);
11225 if ((ftype->attrs & FIELD_ATTRIBUTE_INIT_ONLY) && (((addr = mono_aot_readonly_field_override (field)) != NULL) ||
11226 (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) && vtable->initialized))) {
11227 int ro_type = ftype->type;
11229 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
11230 if (ro_type == MONO_TYPE_VALUETYPE && ftype->data.klass->enumtype) {
11231 ro_type = mono_class_enum_basetype (ftype->data.klass)->type;
11234 GSHAREDVT_FAILURE (op);
11236 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
11239 case MONO_TYPE_BOOLEAN:
11241 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
11245 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
11248 case MONO_TYPE_CHAR:
11250 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
11254 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
11259 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
11263 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
11268 case MONO_TYPE_PTR:
11269 case MONO_TYPE_FNPTR:
11270 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
11271 type_to_eval_stack_type ((cfg), field->type, *sp);
11274 case MONO_TYPE_STRING:
11275 case MONO_TYPE_OBJECT:
11276 case MONO_TYPE_CLASS:
11277 case MONO_TYPE_SZARRAY:
11278 case MONO_TYPE_ARRAY:
11279 if (!mono_gc_is_moving ()) {
11280 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
11281 type_to_eval_stack_type ((cfg), field->type, *sp);
11289 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
11294 case MONO_TYPE_VALUETYPE:
11304 CHECK_STACK_OVF (1);
11306 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
11307 load->flags |= ins_flag;
11313 if ((op == CEE_LDFLD || op == CEE_LDSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
11314 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
11315 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
11326 token = read32 (ip + 1);
11327 klass = mini_get_class (method, token, generic_context);
11328 CHECK_TYPELOAD (klass);
11329 if (ins_flag & MONO_INST_VOLATILE) {
11330 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
11331 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
11333 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
11334 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
11335 ins->flags |= ins_flag;
11336 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
11337 generic_class_is_reference_type (cfg, klass)) {
11338 /* insert call to write barrier */
11339 emit_write_barrier (cfg, sp [0], sp [1]);
11351 const char *data_ptr;
11353 guint32 field_token;
11359 token = read32 (ip + 1);
11361 klass = mini_get_class (method, token, generic_context);
11362 CHECK_TYPELOAD (klass);
11364 context_used = mini_class_check_context_used (cfg, klass);
11366 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
11367 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_OVF_U4);
11368 ins->sreg1 = sp [0]->dreg;
11369 ins->type = STACK_I4;
11370 ins->dreg = alloc_ireg (cfg);
11371 MONO_ADD_INS (cfg->cbb, ins);
11372 *sp = mono_decompose_opcode (cfg, ins);
11375 if (context_used) {
11376 MonoInst *args [3];
11377 MonoClass *array_class = mono_array_class_get (klass, 1);
11378 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class);
11380 /* FIXME: Use OP_NEWARR and decompose later to help abcrem */
11383 args [0] = emit_get_rgctx_klass (cfg, context_used,
11384 array_class, MONO_RGCTX_INFO_VTABLE);
11389 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
11391 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
11393 if (cfg->opt & MONO_OPT_SHARED) {
11394 /* Decompose now to avoid problems with references to the domainvar */
11395 MonoInst *iargs [3];
11397 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11398 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
11399 iargs [2] = sp [0];
11401 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
11403 /* Decompose later since it is needed by abcrem */
11404 MonoClass *array_type = mono_array_class_get (klass, 1);
11405 mono_class_vtable (cfg->domain, array_type);
11406 CHECK_TYPELOAD (array_type);
11408 MONO_INST_NEW (cfg, ins, OP_NEWARR);
11409 ins->dreg = alloc_ireg_ref (cfg);
11410 ins->sreg1 = sp [0]->dreg;
11411 ins->inst_newa_class = klass;
11412 ins->type = STACK_OBJ;
11413 ins->klass = array_type;
11414 MONO_ADD_INS (cfg->cbb, ins);
11415 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
11416 cfg->cbb->has_array_access = TRUE;
11418 /* Needed so mono_emit_load_get_addr () gets called */
11419 mono_get_got_var (cfg);
11429 * we inline/optimize the initialization sequence if possible.
11430 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
11431 * for small sizes open code the memcpy
11432 * ensure the rva field is big enough
11434 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, cfg->cbb, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
11435 MonoMethod *memcpy_method = get_memcpy_method ();
11436 MonoInst *iargs [3];
11437 int add_reg = alloc_ireg_mp (cfg);
11439 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, MONO_STRUCT_OFFSET (MonoArray, vector));
11440 if (cfg->compile_aot) {
11441 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
11443 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
11445 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
11446 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
11455 if (sp [0]->type != STACK_OBJ)
11458 MONO_INST_NEW (cfg, ins, OP_LDLEN);
11459 ins->dreg = alloc_preg (cfg);
11460 ins->sreg1 = sp [0]->dreg;
11461 ins->type = STACK_I4;
11462 /* This flag will be inherited by the decomposition */
11463 ins->flags |= MONO_INST_FAULT;
11464 MONO_ADD_INS (cfg->cbb, ins);
11465 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
11466 cfg->cbb->has_array_access = TRUE;
11474 if (sp [0]->type != STACK_OBJ)
11477 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11479 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11480 CHECK_TYPELOAD (klass);
11481 /* we need to make sure that this array is exactly the type it needs
11482 * to be for correctness. the wrappers are lax with their usage
11483 * so we need to ignore them here
11485 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
11486 MonoClass *array_class = mono_array_class_get (klass, 1);
11487 mini_emit_check_array_type (cfg, sp [0], array_class);
11488 CHECK_TYPELOAD (array_class);
11492 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11497 case CEE_LDELEM_I1:
11498 case CEE_LDELEM_U1:
11499 case CEE_LDELEM_I2:
11500 case CEE_LDELEM_U2:
11501 case CEE_LDELEM_I4:
11502 case CEE_LDELEM_U4:
11503 case CEE_LDELEM_I8:
11505 case CEE_LDELEM_R4:
11506 case CEE_LDELEM_R8:
11507 case CEE_LDELEM_REF: {
11513 if (*ip == CEE_LDELEM) {
11515 token = read32 (ip + 1);
11516 klass = mini_get_class (method, token, generic_context);
11517 CHECK_TYPELOAD (klass);
11518 mono_class_init (klass);
11521 klass = array_access_to_klass (*ip);
11523 if (sp [0]->type != STACK_OBJ)
11526 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11528 if (mini_is_gsharedvt_variable_klass (klass)) {
11529 // FIXME-VT: OP_ICONST optimization
11530 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11531 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
11532 ins->opcode = OP_LOADV_MEMBASE;
11533 } else if (sp [1]->opcode == OP_ICONST) {
11534 int array_reg = sp [0]->dreg;
11535 int index_reg = sp [1]->dreg;
11536 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
11538 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
11539 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
11541 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11542 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
11545 if (*ip == CEE_LDELEM)
11552 case CEE_STELEM_I1:
11553 case CEE_STELEM_I2:
11554 case CEE_STELEM_I4:
11555 case CEE_STELEM_I8:
11556 case CEE_STELEM_R4:
11557 case CEE_STELEM_R8:
11558 case CEE_STELEM_REF:
11563 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11565 if (*ip == CEE_STELEM) {
11567 token = read32 (ip + 1);
11568 klass = mini_get_class (method, token, generic_context);
11569 CHECK_TYPELOAD (klass);
11570 mono_class_init (klass);
11573 klass = array_access_to_klass (*ip);
11575 if (sp [0]->type != STACK_OBJ)
11578 emit_array_store (cfg, klass, sp, TRUE);
11580 if (*ip == CEE_STELEM)
11587 case CEE_CKFINITE: {
11591 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
11592 ins->sreg1 = sp [0]->dreg;
11593 ins->dreg = alloc_freg (cfg);
11594 ins->type = STACK_R8;
11595 MONO_ADD_INS (cfg->cbb, ins);
11597 *sp++ = mono_decompose_opcode (cfg, ins);
11602 case CEE_REFANYVAL: {
11603 MonoInst *src_var, *src;
11605 int klass_reg = alloc_preg (cfg);
11606 int dreg = alloc_preg (cfg);
11608 GSHAREDVT_FAILURE (*ip);
11611 MONO_INST_NEW (cfg, ins, *ip);
11614 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11615 CHECK_TYPELOAD (klass);
11617 context_used = mini_class_check_context_used (cfg, klass);
11620 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
11622 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
11623 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
11624 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass));
11626 if (context_used) {
11627 MonoInst *klass_ins;
11629 klass_ins = emit_get_rgctx_klass (cfg, context_used,
11630 klass, MONO_RGCTX_INFO_KLASS);
11633 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
11634 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
11636 mini_emit_class_check (cfg, klass_reg, klass);
11638 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value));
11639 ins->type = STACK_MP;
11640 ins->klass = klass;
11645 case CEE_MKREFANY: {
11646 MonoInst *loc, *addr;
11648 GSHAREDVT_FAILURE (*ip);
11651 MONO_INST_NEW (cfg, ins, *ip);
11654 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11655 CHECK_TYPELOAD (klass);
11657 context_used = mini_class_check_context_used (cfg, klass);
11659 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
11660 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
11662 if (context_used) {
11663 MonoInst *const_ins;
11664 int type_reg = alloc_preg (cfg);
11666 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
11667 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
11668 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
11669 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
11670 } else if (cfg->compile_aot) {
11671 int const_reg = alloc_preg (cfg);
11672 int type_reg = alloc_preg (cfg);
11674 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
11675 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
11676 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
11677 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
11679 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
11680 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), klass);
11682 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
11684 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
11685 ins->type = STACK_VTYPE;
11686 ins->klass = mono_defaults.typed_reference_class;
11691 case CEE_LDTOKEN: {
11693 MonoClass *handle_class;
11695 CHECK_STACK_OVF (1);
11698 n = read32 (ip + 1);
11700 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
11701 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
11702 handle = mono_method_get_wrapper_data (method, n);
11703 handle_class = mono_method_get_wrapper_data (method, n + 1);
11704 if (handle_class == mono_defaults.typehandle_class)
11705 handle = &((MonoClass*)handle)->byval_arg;
11708 handle = mono_ldtoken_checked (image, n, &handle_class, generic_context, &cfg->error);
11713 mono_class_init (handle_class);
11714 if (cfg->gshared) {
11715 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
11716 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
11717 /* This case handles ldtoken
11718 of an open type, like for
11721 } else if (handle_class == mono_defaults.typehandle_class) {
11722 context_used = mini_class_check_context_used (cfg, mono_class_from_mono_type (handle));
11723 } else if (handle_class == mono_defaults.fieldhandle_class)
11724 context_used = mini_class_check_context_used (cfg, ((MonoClassField*)handle)->parent);
11725 else if (handle_class == mono_defaults.methodhandle_class)
11726 context_used = mini_method_check_context_used (cfg, handle);
11728 g_assert_not_reached ();
11731 if ((cfg->opt & MONO_OPT_SHARED) &&
11732 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
11733 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
11734 MonoInst *addr, *vtvar, *iargs [3];
11735 int method_context_used;
11737 method_context_used = mini_method_check_context_used (cfg, method);
11739 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
11741 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
11742 EMIT_NEW_ICONST (cfg, iargs [1], n);
11743 if (method_context_used) {
11744 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
11745 method, MONO_RGCTX_INFO_METHOD);
11746 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
11748 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
11749 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
11751 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11753 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
11755 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11757 if ((ip + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 5) &&
11758 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
11759 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
11760 (cmethod->klass == mono_defaults.systemtype_class) &&
11761 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
11762 MonoClass *tclass = mono_class_from_mono_type (handle);
11764 mono_class_init (tclass);
11765 if (context_used) {
11766 ins = emit_get_rgctx_klass (cfg, context_used,
11767 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
11768 } else if (cfg->compile_aot) {
11769 if (method->wrapper_type) {
11770 mono_error_init (&error); //got to do it since there are multiple conditionals below
11771 if (mono_class_get_checked (tclass->image, tclass->type_token, &error) == tclass && !generic_context) {
11772 /* Special case for static synchronized wrappers */
11773 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
11775 mono_error_cleanup (&error); /* FIXME don't swallow the error */
11776 /* FIXME: n is not a normal token */
11778 EMIT_NEW_PCONST (cfg, ins, NULL);
11781 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
11784 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
11786 ins->type = STACK_OBJ;
11787 ins->klass = cmethod->klass;
11790 MonoInst *addr, *vtvar;
11792 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
11794 if (context_used) {
11795 if (handle_class == mono_defaults.typehandle_class) {
11796 ins = emit_get_rgctx_klass (cfg, context_used,
11797 mono_class_from_mono_type (handle),
11798 MONO_RGCTX_INFO_TYPE);
11799 } else if (handle_class == mono_defaults.methodhandle_class) {
11800 ins = emit_get_rgctx_method (cfg, context_used,
11801 handle, MONO_RGCTX_INFO_METHOD);
11802 } else if (handle_class == mono_defaults.fieldhandle_class) {
11803 ins = emit_get_rgctx_field (cfg, context_used,
11804 handle, MONO_RGCTX_INFO_CLASS_FIELD);
11806 g_assert_not_reached ();
11808 } else if (cfg->compile_aot) {
11809 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n, generic_context);
11811 EMIT_NEW_PCONST (cfg, ins, handle);
11813 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11814 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
11815 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11825 MONO_INST_NEW (cfg, ins, OP_THROW);
11827 ins->sreg1 = sp [0]->dreg;
11829 cfg->cbb->out_of_line = TRUE;
11830 MONO_ADD_INS (cfg->cbb, ins);
11831 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
11832 MONO_ADD_INS (cfg->cbb, ins);
11835 link_bblock (cfg, cfg->cbb, end_bblock);
11836 start_new_bblock = 1;
11838 case CEE_ENDFINALLY:
11839 /* mono_save_seq_point_info () depends on this */
11840 if (sp != stack_start)
11841 emit_seq_point (cfg, method, ip, FALSE, FALSE);
11842 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
11843 MONO_ADD_INS (cfg->cbb, ins);
11845 start_new_bblock = 1;
11848 * Control will leave the method so empty the stack, otherwise
11849 * the next basic block will start with a nonempty stack.
11851 while (sp != stack_start) {
11856 case CEE_LEAVE_S: {
11859 if (*ip == CEE_LEAVE) {
11861 target = ip + 5 + (gint32)read32(ip + 1);
11864 target = ip + 2 + (signed char)(ip [1]);
11867 /* empty the stack */
11868 while (sp != stack_start) {
11873 * If this leave statement is in a catch block, check for a
11874 * pending exception, and rethrow it if necessary.
11875 * We avoid doing this in runtime invoke wrappers, since those are called
11876 * by native code which excepts the wrapper to catch all exceptions.
11878 for (i = 0; i < header->num_clauses; ++i) {
11879 MonoExceptionClause *clause = &header->clauses [i];
11882 * Use <= in the final comparison to handle clauses with multiple
11883 * leave statements, like in bug #78024.
11884 * The ordering of the exception clauses guarantees that we find the
11885 * innermost clause.
11887 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
11889 MonoBasicBlock *dont_throw;
11894 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
11897 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
11899 NEW_BBLOCK (cfg, dont_throw);
11902 * Currently, we always rethrow the abort exception, despite the
11903 * fact that this is not correct. See thread6.cs for an example.
11904 * But propagating the abort exception is more important than
11905 * getting the sematics right.
11907 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
11908 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
11909 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
11911 MONO_START_BB (cfg, dont_throw);
11915 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
11917 MonoExceptionClause *clause;
11919 for (tmp = handlers; tmp; tmp = tmp->next) {
11920 clause = tmp->data;
11921 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
11923 link_bblock (cfg, cfg->cbb, tblock);
11924 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
11925 ins->inst_target_bb = tblock;
11926 ins->inst_eh_block = clause;
11927 MONO_ADD_INS (cfg->cbb, ins);
11928 cfg->cbb->has_call_handler = 1;
11929 if (COMPILE_LLVM (cfg)) {
11930 MonoBasicBlock *target_bb;
11933 * Link the finally bblock with the target, since it will
11934 * conceptually branch there.
11935 * FIXME: Have to link the bblock containing the endfinally.
11937 GET_BBLOCK (cfg, target_bb, target);
11938 link_bblock (cfg, tblock, target_bb);
11941 g_list_free (handlers);
11944 MONO_INST_NEW (cfg, ins, OP_BR);
11945 MONO_ADD_INS (cfg->cbb, ins);
11946 GET_BBLOCK (cfg, tblock, target);
11947 link_bblock (cfg, cfg->cbb, tblock);
11948 ins->inst_target_bb = tblock;
11949 start_new_bblock = 1;
11951 if (*ip == CEE_LEAVE)
11960 * Mono specific opcodes
11962 case MONO_CUSTOM_PREFIX: {
11964 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
11968 case CEE_MONO_ICALL: {
11970 MonoJitICallInfo *info;
11972 token = read32 (ip + 2);
11973 func = mono_method_get_wrapper_data (method, token);
11974 info = mono_find_jit_icall_by_addr (func);
11976 g_error ("Could not find icall address in wrapper %s", mono_method_full_name (method, 1));
11979 CHECK_STACK (info->sig->param_count);
11980 sp -= info->sig->param_count;
11982 ins = mono_emit_jit_icall (cfg, info->func, sp);
11983 if (!MONO_TYPE_IS_VOID (info->sig->ret))
11987 inline_costs += 10 * num_calls++;
11991 case CEE_MONO_LDPTR_CARD_TABLE: {
11993 gpointer card_mask;
11994 CHECK_STACK_OVF (1);
11996 if (cfg->compile_aot)
11997 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR, NULL);
11999 EMIT_NEW_PCONST (cfg, ins, mono_gc_get_card_table (&shift_bits, &card_mask));
12003 inline_costs += 10 * num_calls++;
12006 case CEE_MONO_LDPTR_NURSERY_START: {
12009 CHECK_STACK_OVF (1);
12011 if (cfg->compile_aot)
12012 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_GC_NURSERY_START, NULL);
12014 EMIT_NEW_PCONST (cfg, ins, mono_gc_get_nursery (&shift_bits, &size));
12018 inline_costs += 10 * num_calls++;
12021 case CEE_MONO_LDPTR_INT_REQ_FLAG: {
12022 CHECK_STACK_OVF (1);
12024 if (cfg->compile_aot)
12025 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
12027 EMIT_NEW_PCONST (cfg, ins, mono_thread_interruption_request_flag ());
12031 inline_costs += 10 * num_calls++;
12034 case CEE_MONO_LDPTR: {
12037 CHECK_STACK_OVF (1);
12039 token = read32 (ip + 2);
12041 ptr = mono_method_get_wrapper_data (method, token);
12042 EMIT_NEW_PCONST (cfg, ins, ptr);
12045 inline_costs += 10 * num_calls++;
12046 /* Can't embed random pointers into AOT code */
12050 case CEE_MONO_JIT_ICALL_ADDR: {
12051 MonoJitICallInfo *callinfo;
12054 CHECK_STACK_OVF (1);
12056 token = read32 (ip + 2);
12058 ptr = mono_method_get_wrapper_data (method, token);
12059 callinfo = mono_find_jit_icall_by_addr (ptr);
12060 g_assert (callinfo);
12061 EMIT_NEW_JIT_ICALL_ADDRCONST (cfg, ins, (char*)callinfo->name);
12064 inline_costs += 10 * num_calls++;
12067 case CEE_MONO_ICALL_ADDR: {
12068 MonoMethod *cmethod;
12071 CHECK_STACK_OVF (1);
12073 token = read32 (ip + 2);
12075 cmethod = mono_method_get_wrapper_data (method, token);
12077 if (cfg->compile_aot) {
12078 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
12080 ptr = mono_lookup_internal_call (cmethod);
12082 EMIT_NEW_PCONST (cfg, ins, ptr);
12088 case CEE_MONO_VTADDR: {
12089 MonoInst *src_var, *src;
12095 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
12096 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
12101 case CEE_MONO_NEWOBJ: {
12102 MonoInst *iargs [2];
12104 CHECK_STACK_OVF (1);
12106 token = read32 (ip + 2);
12107 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12108 mono_class_init (klass);
12109 NEW_DOMAINCONST (cfg, iargs [0]);
12110 MONO_ADD_INS (cfg->cbb, iargs [0]);
12111 NEW_CLASSCONST (cfg, iargs [1], klass);
12112 MONO_ADD_INS (cfg->cbb, iargs [1]);
12113 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
12115 inline_costs += 10 * num_calls++;
12118 case CEE_MONO_OBJADDR:
12121 MONO_INST_NEW (cfg, ins, OP_MOVE);
12122 ins->dreg = alloc_ireg_mp (cfg);
12123 ins->sreg1 = sp [0]->dreg;
12124 ins->type = STACK_MP;
12125 MONO_ADD_INS (cfg->cbb, ins);
12129 case CEE_MONO_LDNATIVEOBJ:
12131 * Similar to LDOBJ, but instead load the unmanaged
12132 * representation of the vtype to the stack.
12137 token = read32 (ip + 2);
12138 klass = mono_method_get_wrapper_data (method, token);
12139 g_assert (klass->valuetype);
12140 mono_class_init (klass);
12143 MonoInst *src, *dest, *temp;
12146 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
12147 temp->backend.is_pinvoke = 1;
12148 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
12149 mini_emit_stobj (cfg, dest, src, klass, TRUE);
12151 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
12152 dest->type = STACK_VTYPE;
12153 dest->klass = klass;
12159 case CEE_MONO_RETOBJ: {
12161 * Same as RET, but return the native representation of a vtype
12164 g_assert (cfg->ret);
12165 g_assert (mono_method_signature (method)->pinvoke);
12170 token = read32 (ip + 2);
12171 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12173 if (!cfg->vret_addr) {
12174 g_assert (cfg->ret_var_is_local);
12176 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
12178 EMIT_NEW_RETLOADA (cfg, ins);
12180 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
12182 if (sp != stack_start)
12185 MONO_INST_NEW (cfg, ins, OP_BR);
12186 ins->inst_target_bb = end_bblock;
12187 MONO_ADD_INS (cfg->cbb, ins);
12188 link_bblock (cfg, cfg->cbb, end_bblock);
12189 start_new_bblock = 1;
12193 case CEE_MONO_CISINST:
12194 case CEE_MONO_CCASTCLASS: {
12199 token = read32 (ip + 2);
12200 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12201 if (ip [1] == CEE_MONO_CISINST)
12202 ins = handle_cisinst (cfg, klass, sp [0]);
12204 ins = handle_ccastclass (cfg, klass, sp [0]);
12209 case CEE_MONO_SAVE_LMF:
12210 case CEE_MONO_RESTORE_LMF:
12211 #ifdef MONO_ARCH_HAVE_LMF_OPS
12212 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
12213 MONO_ADD_INS (cfg->cbb, ins);
12214 cfg->need_lmf_area = TRUE;
12218 case CEE_MONO_CLASSCONST:
12219 CHECK_STACK_OVF (1);
12221 token = read32 (ip + 2);
12222 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
12225 inline_costs += 10 * num_calls++;
12227 case CEE_MONO_NOT_TAKEN:
12228 cfg->cbb->out_of_line = TRUE;
12231 case CEE_MONO_TLS: {
12234 CHECK_STACK_OVF (1);
12236 key = (gint32)read32 (ip + 2);
12237 g_assert (key < TLS_KEY_NUM);
12239 ins = mono_create_tls_get (cfg, key);
12241 if (cfg->compile_aot) {
12243 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
12244 ins->dreg = alloc_preg (cfg);
12245 ins->type = STACK_PTR;
12247 g_assert_not_reached ();
12250 ins->type = STACK_PTR;
12251 MONO_ADD_INS (cfg->cbb, ins);
12256 case CEE_MONO_DYN_CALL: {
12257 MonoCallInst *call;
12259 /* It would be easier to call a trampoline, but that would put an
12260 * extra frame on the stack, confusing exception handling. So
12261 * implement it inline using an opcode for now.
12264 if (!cfg->dyn_call_var) {
12265 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
12266 /* prevent it from being register allocated */
12267 cfg->dyn_call_var->flags |= MONO_INST_VOLATILE;
12270 /* Has to use a call inst since it local regalloc expects it */
12271 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
12272 ins = (MonoInst*)call;
12274 ins->sreg1 = sp [0]->dreg;
12275 ins->sreg2 = sp [1]->dreg;
12276 MONO_ADD_INS (cfg->cbb, ins);
12278 cfg->param_area = MAX (cfg->param_area, MONO_ARCH_DYN_CALL_PARAM_AREA);
12281 inline_costs += 10 * num_calls++;
12285 case CEE_MONO_MEMORY_BARRIER: {
12287 emit_memory_barrier (cfg, (int)read32 (ip + 2));
12291 case CEE_MONO_JIT_ATTACH: {
12292 MonoInst *args [16], *domain_ins;
12293 MonoInst *ad_ins, *jit_tls_ins;
12294 MonoBasicBlock *next_bb = NULL, *call_bb = NULL;
12296 cfg->orig_domain_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
12298 EMIT_NEW_PCONST (cfg, ins, NULL);
12299 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
12301 ad_ins = mono_get_domain_intrinsic (cfg);
12302 jit_tls_ins = mono_get_jit_tls_intrinsic (cfg);
12304 if (MONO_ARCH_HAVE_TLS_GET && ad_ins && jit_tls_ins) {
12305 NEW_BBLOCK (cfg, next_bb);
12306 NEW_BBLOCK (cfg, call_bb);
12308 if (cfg->compile_aot) {
12309 /* AOT code is only used in the root domain */
12310 EMIT_NEW_PCONST (cfg, domain_ins, NULL);
12312 EMIT_NEW_PCONST (cfg, domain_ins, cfg->domain);
12314 MONO_ADD_INS (cfg->cbb, ad_ins);
12315 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, ad_ins->dreg, domain_ins->dreg);
12316 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, call_bb);
12318 MONO_ADD_INS (cfg->cbb, jit_tls_ins);
12319 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, jit_tls_ins->dreg, 0);
12320 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, call_bb);
12322 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, next_bb);
12323 MONO_START_BB (cfg, call_bb);
12326 if (cfg->compile_aot) {
12327 /* AOT code is only used in the root domain */
12328 EMIT_NEW_PCONST (cfg, args [0], NULL);
12330 EMIT_NEW_PCONST (cfg, args [0], cfg->domain);
12332 ins = mono_emit_jit_icall (cfg, mono_jit_thread_attach, args);
12333 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
12336 MONO_START_BB (cfg, next_bb);
12340 case CEE_MONO_JIT_DETACH: {
12341 MonoInst *args [16];
12343 /* Restore the original domain */
12344 dreg = alloc_ireg (cfg);
12345 EMIT_NEW_UNALU (cfg, args [0], OP_MOVE, dreg, cfg->orig_domain_var->dreg);
12346 mono_emit_jit_icall (cfg, mono_jit_set_domain, args);
12351 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
12357 case CEE_PREFIX1: {
12360 case CEE_ARGLIST: {
12361 /* somewhat similar to LDTOKEN */
12362 MonoInst *addr, *vtvar;
12363 CHECK_STACK_OVF (1);
12364 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
12366 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
12367 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
12369 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
12370 ins->type = STACK_VTYPE;
12371 ins->klass = mono_defaults.argumenthandle_class;
12381 MonoInst *cmp, *arg1, *arg2;
12389 * The following transforms:
12390 * CEE_CEQ into OP_CEQ
12391 * CEE_CGT into OP_CGT
12392 * CEE_CGT_UN into OP_CGT_UN
12393 * CEE_CLT into OP_CLT
12394 * CEE_CLT_UN into OP_CLT_UN
12396 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
12398 MONO_INST_NEW (cfg, ins, cmp->opcode);
12399 cmp->sreg1 = arg1->dreg;
12400 cmp->sreg2 = arg2->dreg;
12401 type_from_op (cfg, cmp, arg1, arg2);
12403 add_widen_op (cfg, cmp, &arg1, &arg2);
12404 if ((arg1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((arg1->type == STACK_PTR) || (arg1->type == STACK_OBJ) || (arg1->type == STACK_MP))))
12405 cmp->opcode = OP_LCOMPARE;
12406 else if (arg1->type == STACK_R4)
12407 cmp->opcode = OP_RCOMPARE;
12408 else if (arg1->type == STACK_R8)
12409 cmp->opcode = OP_FCOMPARE;
12411 cmp->opcode = OP_ICOMPARE;
12412 MONO_ADD_INS (cfg->cbb, cmp);
12413 ins->type = STACK_I4;
12414 ins->dreg = alloc_dreg (cfg, ins->type);
12415 type_from_op (cfg, ins, arg1, arg2);
12417 if (cmp->opcode == OP_FCOMPARE || cmp->opcode == OP_RCOMPARE) {
12419 * The backends expect the fceq opcodes to do the
12422 ins->sreg1 = cmp->sreg1;
12423 ins->sreg2 = cmp->sreg2;
12426 MONO_ADD_INS (cfg->cbb, ins);
12432 MonoInst *argconst;
12433 MonoMethod *cil_method;
12435 CHECK_STACK_OVF (1);
12437 n = read32 (ip + 2);
12438 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
12439 if (!cmethod || mono_loader_get_last_error ())
12441 mono_class_init (cmethod->klass);
12443 mono_save_token_info (cfg, image, n, cmethod);
12445 context_used = mini_method_check_context_used (cfg, cmethod);
12447 cil_method = cmethod;
12448 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
12449 METHOD_ACCESS_FAILURE (method, cil_method);
12451 if (mono_security_core_clr_enabled ())
12452 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
12455 * Optimize the common case of ldftn+delegate creation
12457 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
12458 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
12459 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
12460 MonoInst *target_ins, *handle_ins;
12461 MonoMethod *invoke;
12462 int invoke_context_used;
12464 invoke = mono_get_delegate_invoke (ctor_method->klass);
12465 if (!invoke || !mono_method_signature (invoke))
12468 invoke_context_used = mini_method_check_context_used (cfg, invoke);
12470 target_ins = sp [-1];
12472 if (mono_security_core_clr_enabled ())
12473 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method);
12475 if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
12476 /*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
12477 if (mono_method_signature (invoke)->param_count == mono_method_signature (cmethod)->param_count) {
12478 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0);
12479 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException");
12483 /* FIXME: SGEN support */
12484 if (invoke_context_used == 0) {
12486 if (cfg->verbose_level > 3)
12487 g_print ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
12488 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, FALSE))) {
12491 CHECK_CFG_EXCEPTION;
12501 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
12502 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
12506 inline_costs += 10 * num_calls++;
12509 case CEE_LDVIRTFTN: {
12510 MonoInst *args [2];
12514 n = read32 (ip + 2);
12515 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
12516 if (!cmethod || mono_loader_get_last_error ())
12518 mono_class_init (cmethod->klass);
12520 context_used = mini_method_check_context_used (cfg, cmethod);
12522 if (mono_security_core_clr_enabled ())
12523 ensure_method_is_allowed_to_call_method (cfg, method, cmethod);
12526 * Optimize the common case of ldvirtftn+delegate creation
12528 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, cfg->cbb, ip + 6) && (ip [6] == CEE_NEWOBJ) && (ip > header->code && ip [-1] == CEE_DUP)) {
12529 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
12530 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
12531 MonoInst *target_ins, *handle_ins;
12532 MonoMethod *invoke;
12533 int invoke_context_used;
12534 gboolean is_virtual = cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL;
12536 invoke = mono_get_delegate_invoke (ctor_method->klass);
12537 if (!invoke || !mono_method_signature (invoke))
12540 invoke_context_used = mini_method_check_context_used (cfg, invoke);
12542 target_ins = sp [-1];
12544 if (mono_security_core_clr_enabled ())
12545 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method);
12547 /* FIXME: SGEN support */
12548 if (invoke_context_used == 0) {
12550 if (cfg->verbose_level > 3)
12551 g_print ("converting (in B%d: stack: %d) %s", cfg->cbb->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
12552 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, is_virtual))) {
12555 CHECK_CFG_EXCEPTION;
12568 args [1] = emit_get_rgctx_method (cfg, context_used,
12569 cmethod, MONO_RGCTX_INFO_METHOD);
12572 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
12574 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
12577 inline_costs += 10 * num_calls++;
12581 CHECK_STACK_OVF (1);
12583 n = read16 (ip + 2);
12585 EMIT_NEW_ARGLOAD (cfg, ins, n);
12590 CHECK_STACK_OVF (1);
12592 n = read16 (ip + 2);
12594 NEW_ARGLOADA (cfg, ins, n);
12595 MONO_ADD_INS (cfg->cbb, ins);
12603 n = read16 (ip + 2);
12605 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
12607 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
12611 CHECK_STACK_OVF (1);
12613 n = read16 (ip + 2);
12615 EMIT_NEW_LOCLOAD (cfg, ins, n);
12620 unsigned char *tmp_ip;
12621 CHECK_STACK_OVF (1);
12623 n = read16 (ip + 2);
12626 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
12632 EMIT_NEW_LOCLOADA (cfg, ins, n);
12641 n = read16 (ip + 2);
12643 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
12645 emit_stloc_ir (cfg, sp, header, n);
12652 if (sp != stack_start)
12654 if (cfg->method != method)
12656 * Inlining this into a loop in a parent could lead to
12657 * stack overflows which is different behavior than the
12658 * non-inlined case, thus disable inlining in this case.
12660 INLINE_FAILURE("localloc");
12662 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
12663 ins->dreg = alloc_preg (cfg);
12664 ins->sreg1 = sp [0]->dreg;
12665 ins->type = STACK_PTR;
12666 MONO_ADD_INS (cfg->cbb, ins);
12668 cfg->flags |= MONO_CFG_HAS_ALLOCA;
12670 ins->flags |= MONO_INST_INIT;
12675 case CEE_ENDFILTER: {
12676 MonoExceptionClause *clause, *nearest;
12681 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
12683 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
12684 ins->sreg1 = (*sp)->dreg;
12685 MONO_ADD_INS (cfg->cbb, ins);
12686 start_new_bblock = 1;
12690 for (cc = 0; cc < header->num_clauses; ++cc) {
12691 clause = &header->clauses [cc];
12692 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
12693 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
12694 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset)))
12697 g_assert (nearest);
12698 if ((ip - header->code) != nearest->handler_offset)
12703 case CEE_UNALIGNED_:
12704 ins_flag |= MONO_INST_UNALIGNED;
12705 /* FIXME: record alignment? we can assume 1 for now */
12709 case CEE_VOLATILE_:
12710 ins_flag |= MONO_INST_VOLATILE;
12714 ins_flag |= MONO_INST_TAILCALL;
12715 cfg->flags |= MONO_CFG_HAS_TAIL;
12716 /* Can't inline tail calls at this time */
12717 inline_costs += 100000;
12724 token = read32 (ip + 2);
12725 klass = mini_get_class (method, token, generic_context);
12726 CHECK_TYPELOAD (klass);
12727 if (generic_class_is_reference_type (cfg, klass))
12728 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
12730 mini_emit_initobj (cfg, *sp, NULL, klass);
12734 case CEE_CONSTRAINED_:
12736 token = read32 (ip + 2);
12737 constrained_class = mini_get_class (method, token, generic_context);
12738 CHECK_TYPELOAD (constrained_class);
12742 case CEE_INITBLK: {
12743 MonoInst *iargs [3];
12747 /* Skip optimized paths for volatile operations. */
12748 if ((ip [1] == CEE_CPBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
12749 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
12750 } else if ((ip [1] == CEE_INITBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
12751 /* emit_memset only works when val == 0 */
12752 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
12755 iargs [0] = sp [0];
12756 iargs [1] = sp [1];
12757 iargs [2] = sp [2];
12758 if (ip [1] == CEE_CPBLK) {
12760 * FIXME: It's unclear whether we should be emitting both the acquire
12761 * and release barriers for cpblk. It is technically both a load and
12762 * store operation, so it seems like that's the sensible thing to do.
12764 * FIXME: We emit full barriers on both sides of the operation for
12765 * simplicity. We should have a separate atomic memcpy method instead.
12767 MonoMethod *memcpy_method = get_memcpy_method ();
12769 if (ins_flag & MONO_INST_VOLATILE)
12770 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
12772 call = mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
12773 call->flags |= ins_flag;
12775 if (ins_flag & MONO_INST_VOLATILE)
12776 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
12778 MonoMethod *memset_method = get_memset_method ();
12779 if (ins_flag & MONO_INST_VOLATILE) {
12780 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
12781 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
12783 call = mono_emit_method_call (cfg, memset_method, iargs, NULL);
12784 call->flags |= ins_flag;
12795 ins_flag |= MONO_INST_NOTYPECHECK;
12797 ins_flag |= MONO_INST_NORANGECHECK;
12798 /* we ignore the no-nullcheck for now since we
12799 * really do it explicitly only when doing callvirt->call
12803 case CEE_RETHROW: {
12805 int handler_offset = -1;
12807 for (i = 0; i < header->num_clauses; ++i) {
12808 MonoExceptionClause *clause = &header->clauses [i];
12809 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
12810 handler_offset = clause->handler_offset;
12815 cfg->cbb->flags |= BB_EXCEPTION_UNSAFE;
12817 if (handler_offset == -1)
12820 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
12821 MONO_INST_NEW (cfg, ins, OP_RETHROW);
12822 ins->sreg1 = load->dreg;
12823 MONO_ADD_INS (cfg->cbb, ins);
12825 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
12826 MONO_ADD_INS (cfg->cbb, ins);
12829 link_bblock (cfg, cfg->cbb, end_bblock);
12830 start_new_bblock = 1;
12838 CHECK_STACK_OVF (1);
12840 token = read32 (ip + 2);
12841 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !image_is_dynamic (method->klass->image) && !generic_context) {
12842 MonoType *type = mono_type_create_from_typespec_checked (image, token, &cfg->error);
12845 val = mono_type_size (type, &ialign);
12847 MonoClass *klass = mini_get_class (method, token, generic_context);
12848 CHECK_TYPELOAD (klass);
12850 val = mono_type_size (&klass->byval_arg, &ialign);
12852 if (mini_is_gsharedvt_klass (klass))
12853 GSHAREDVT_FAILURE (*ip);
12855 EMIT_NEW_ICONST (cfg, ins, val);
12860 case CEE_REFANYTYPE: {
12861 MonoInst *src_var, *src;
12863 GSHAREDVT_FAILURE (*ip);
12869 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
12871 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
12872 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
12873 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type));
12878 case CEE_READONLY_:
12891 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
12901 g_warning ("opcode 0x%02x not handled", *ip);
12905 if (start_new_bblock != 1)
12908 cfg->cbb->cil_length = ip - cfg->cbb->cil_code;
12909 if (cfg->cbb->next_bb) {
12910 /* This could already be set because of inlining, #693905 */
12911 MonoBasicBlock *bb = cfg->cbb;
12913 while (bb->next_bb)
12915 bb->next_bb = end_bblock;
12917 cfg->cbb->next_bb = end_bblock;
12920 if (cfg->method == method && cfg->domainvar) {
12922 MonoInst *get_domain;
12924 cfg->cbb = init_localsbb;
12926 if ((get_domain = mono_get_domain_intrinsic (cfg))) {
12927 MONO_ADD_INS (cfg->cbb, get_domain);
12929 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
12931 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
12932 MONO_ADD_INS (cfg->cbb, store);
12935 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
12936 if (cfg->compile_aot)
12937 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
12938 mono_get_got_var (cfg);
12941 if (cfg->method == method && cfg->got_var)
12942 mono_emit_load_got_addr (cfg);
12944 if (init_localsbb) {
12945 cfg->cbb = init_localsbb;
12947 for (i = 0; i < header->num_locals; ++i) {
12948 emit_init_local (cfg, i, header->locals [i], init_locals);
12952 if (cfg->init_ref_vars && cfg->method == method) {
12953 /* Emit initialization for ref vars */
12954 // FIXME: Avoid duplication initialization for IL locals.
12955 for (i = 0; i < cfg->num_varinfo; ++i) {
12956 MonoInst *ins = cfg->varinfo [i];
12958 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
12959 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
12963 if (cfg->lmf_var && cfg->method == method) {
12964 cfg->cbb = init_localsbb;
12965 emit_push_lmf (cfg);
12968 cfg->cbb = init_localsbb;
12969 emit_instrumentation_call (cfg, mono_profiler_method_enter);
12972 MonoBasicBlock *bb;
12975 * Make seq points at backward branch targets interruptable.
12977 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
12978 if (bb->code && bb->in_count > 1 && bb->code->opcode == OP_SEQ_POINT)
12979 bb->code->flags |= MONO_INST_SINGLE_STEP_LOC;
12982 /* Add a sequence point for method entry/exit events */
12983 if (seq_points && cfg->gen_sdb_seq_points) {
12984 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
12985 MONO_ADD_INS (init_localsbb, ins);
12986 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
12987 MONO_ADD_INS (cfg->bb_exit, ins);
12991 * Add seq points for IL offsets which have line number info, but wasn't generated a seq point during JITting because
12992 * the code they refer to was dead (#11880).
12994 if (sym_seq_points) {
12995 for (i = 0; i < header->code_size; ++i) {
12996 if (mono_bitset_test_fast (seq_point_locs, i) && !mono_bitset_test_fast (seq_point_set_locs, i)) {
12999 NEW_SEQ_POINT (cfg, ins, i, FALSE);
13000 mono_add_seq_point (cfg, NULL, ins, SEQ_POINT_NATIVE_OFFSET_DEAD_CODE);
13007 if (cfg->method == method) {
13008 MonoBasicBlock *bb;
13009 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13010 bb->region = mono_find_block_region (cfg, bb->real_offset);
13012 mono_create_spvar_for_region (cfg, bb->region);
13013 if (cfg->verbose_level > 2)
13014 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
13018 if (inline_costs < 0) {
13021 /* Method is too large */
13022 mname = mono_method_full_name (method, TRUE);
13023 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
13024 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
13028 if ((cfg->verbose_level > 2) && (cfg->method == method))
13029 mono_print_code (cfg, "AFTER METHOD-TO-IR");
13034 g_assert (!mono_error_ok (&cfg->error));
13038 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
13042 set_exception_type_from_invalid_il (cfg, method, ip);
13046 g_slist_free (class_inits);
13047 mono_basic_block_free (original_bb);
13048 cfg->dont_inline = g_list_remove (cfg->dont_inline, method);
13049 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
13050 if (cfg->exception_type)
13053 return inline_costs;
13057 store_membase_reg_to_store_membase_imm (int opcode)
13060 case OP_STORE_MEMBASE_REG:
13061 return OP_STORE_MEMBASE_IMM;
13062 case OP_STOREI1_MEMBASE_REG:
13063 return OP_STOREI1_MEMBASE_IMM;
13064 case OP_STOREI2_MEMBASE_REG:
13065 return OP_STOREI2_MEMBASE_IMM;
13066 case OP_STOREI4_MEMBASE_REG:
13067 return OP_STOREI4_MEMBASE_IMM;
13068 case OP_STOREI8_MEMBASE_REG:
13069 return OP_STOREI8_MEMBASE_IMM;
13071 g_assert_not_reached ();
13078 mono_op_to_op_imm (int opcode)
13082 return OP_IADD_IMM;
13084 return OP_ISUB_IMM;
13086 return OP_IDIV_IMM;
13088 return OP_IDIV_UN_IMM;
13090 return OP_IREM_IMM;
13092 return OP_IREM_UN_IMM;
13094 return OP_IMUL_IMM;
13096 return OP_IAND_IMM;
13100 return OP_IXOR_IMM;
13102 return OP_ISHL_IMM;
13104 return OP_ISHR_IMM;
13106 return OP_ISHR_UN_IMM;
13109 return OP_LADD_IMM;
13111 return OP_LSUB_IMM;
13113 return OP_LAND_IMM;
13117 return OP_LXOR_IMM;
13119 return OP_LSHL_IMM;
13121 return OP_LSHR_IMM;
13123 return OP_LSHR_UN_IMM;
13124 #if SIZEOF_REGISTER == 8
13126 return OP_LREM_IMM;
13130 return OP_COMPARE_IMM;
13132 return OP_ICOMPARE_IMM;
13134 return OP_LCOMPARE_IMM;
13136 case OP_STORE_MEMBASE_REG:
13137 return OP_STORE_MEMBASE_IMM;
13138 case OP_STOREI1_MEMBASE_REG:
13139 return OP_STOREI1_MEMBASE_IMM;
13140 case OP_STOREI2_MEMBASE_REG:
13141 return OP_STOREI2_MEMBASE_IMM;
13142 case OP_STOREI4_MEMBASE_REG:
13143 return OP_STOREI4_MEMBASE_IMM;
13145 #if defined(TARGET_X86) || defined (TARGET_AMD64)
13147 return OP_X86_PUSH_IMM;
13148 case OP_X86_COMPARE_MEMBASE_REG:
13149 return OP_X86_COMPARE_MEMBASE_IMM;
13151 #if defined(TARGET_AMD64)
13152 case OP_AMD64_ICOMPARE_MEMBASE_REG:
13153 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
13155 case OP_VOIDCALL_REG:
13156 return OP_VOIDCALL;
13164 return OP_LOCALLOC_IMM;
13171 ldind_to_load_membase (int opcode)
13175 return OP_LOADI1_MEMBASE;
13177 return OP_LOADU1_MEMBASE;
13179 return OP_LOADI2_MEMBASE;
13181 return OP_LOADU2_MEMBASE;
13183 return OP_LOADI4_MEMBASE;
13185 return OP_LOADU4_MEMBASE;
13187 return OP_LOAD_MEMBASE;
13188 case CEE_LDIND_REF:
13189 return OP_LOAD_MEMBASE;
13191 return OP_LOADI8_MEMBASE;
13193 return OP_LOADR4_MEMBASE;
13195 return OP_LOADR8_MEMBASE;
13197 g_assert_not_reached ();
13204 stind_to_store_membase (int opcode)
13208 return OP_STOREI1_MEMBASE_REG;
13210 return OP_STOREI2_MEMBASE_REG;
13212 return OP_STOREI4_MEMBASE_REG;
13214 case CEE_STIND_REF:
13215 return OP_STORE_MEMBASE_REG;
13217 return OP_STOREI8_MEMBASE_REG;
13219 return OP_STORER4_MEMBASE_REG;
13221 return OP_STORER8_MEMBASE_REG;
13223 g_assert_not_reached ();
13230 mono_load_membase_to_load_mem (int opcode)
13232 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
13233 #if defined(TARGET_X86) || defined(TARGET_AMD64)
13235 case OP_LOAD_MEMBASE:
13236 return OP_LOAD_MEM;
13237 case OP_LOADU1_MEMBASE:
13238 return OP_LOADU1_MEM;
13239 case OP_LOADU2_MEMBASE:
13240 return OP_LOADU2_MEM;
13241 case OP_LOADI4_MEMBASE:
13242 return OP_LOADI4_MEM;
13243 case OP_LOADU4_MEMBASE:
13244 return OP_LOADU4_MEM;
13245 #if SIZEOF_REGISTER == 8
13246 case OP_LOADI8_MEMBASE:
13247 return OP_LOADI8_MEM;
13256 op_to_op_dest_membase (int store_opcode, int opcode)
13258 #if defined(TARGET_X86)
13259 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
13264 return OP_X86_ADD_MEMBASE_REG;
13266 return OP_X86_SUB_MEMBASE_REG;
13268 return OP_X86_AND_MEMBASE_REG;
13270 return OP_X86_OR_MEMBASE_REG;
13272 return OP_X86_XOR_MEMBASE_REG;
13275 return OP_X86_ADD_MEMBASE_IMM;
13278 return OP_X86_SUB_MEMBASE_IMM;
13281 return OP_X86_AND_MEMBASE_IMM;
13284 return OP_X86_OR_MEMBASE_IMM;
13287 return OP_X86_XOR_MEMBASE_IMM;
13293 #if defined(TARGET_AMD64)
13294 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
13299 return OP_X86_ADD_MEMBASE_REG;
13301 return OP_X86_SUB_MEMBASE_REG;
13303 return OP_X86_AND_MEMBASE_REG;
13305 return OP_X86_OR_MEMBASE_REG;
13307 return OP_X86_XOR_MEMBASE_REG;
13309 return OP_X86_ADD_MEMBASE_IMM;
13311 return OP_X86_SUB_MEMBASE_IMM;
13313 return OP_X86_AND_MEMBASE_IMM;
13315 return OP_X86_OR_MEMBASE_IMM;
13317 return OP_X86_XOR_MEMBASE_IMM;
13319 return OP_AMD64_ADD_MEMBASE_REG;
13321 return OP_AMD64_SUB_MEMBASE_REG;
13323 return OP_AMD64_AND_MEMBASE_REG;
13325 return OP_AMD64_OR_MEMBASE_REG;
13327 return OP_AMD64_XOR_MEMBASE_REG;
13330 return OP_AMD64_ADD_MEMBASE_IMM;
13333 return OP_AMD64_SUB_MEMBASE_IMM;
13336 return OP_AMD64_AND_MEMBASE_IMM;
13339 return OP_AMD64_OR_MEMBASE_IMM;
13342 return OP_AMD64_XOR_MEMBASE_IMM;
13352 op_to_op_store_membase (int store_opcode, int opcode)
13354 #if defined(TARGET_X86) || defined(TARGET_AMD64)
13357 if (store_opcode == OP_STOREI1_MEMBASE_REG)
13358 return OP_X86_SETEQ_MEMBASE;
13360 if (store_opcode == OP_STOREI1_MEMBASE_REG)
13361 return OP_X86_SETNE_MEMBASE;
13369 op_to_op_src1_membase (int load_opcode, int opcode)
13372 /* FIXME: This has sign extension issues */
13374 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
13375 return OP_X86_COMPARE_MEMBASE8_IMM;
13378 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
13383 return OP_X86_PUSH_MEMBASE;
13384 case OP_COMPARE_IMM:
13385 case OP_ICOMPARE_IMM:
13386 return OP_X86_COMPARE_MEMBASE_IMM;
13389 return OP_X86_COMPARE_MEMBASE_REG;
13393 #ifdef TARGET_AMD64
13394 /* FIXME: This has sign extension issues */
13396 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
13397 return OP_X86_COMPARE_MEMBASE8_IMM;
13402 #ifdef __mono_ilp32__
13403 if (load_opcode == OP_LOADI8_MEMBASE)
13405 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
13407 return OP_X86_PUSH_MEMBASE;
13409 /* FIXME: This only works for 32 bit immediates
13410 case OP_COMPARE_IMM:
13411 case OP_LCOMPARE_IMM:
13412 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
13413 return OP_AMD64_COMPARE_MEMBASE_IMM;
13415 case OP_ICOMPARE_IMM:
13416 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
13417 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
13421 #ifdef __mono_ilp32__
13422 if (load_opcode == OP_LOAD_MEMBASE)
13423 return OP_AMD64_ICOMPARE_MEMBASE_REG;
13424 if (load_opcode == OP_LOADI8_MEMBASE)
13426 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
13428 return OP_AMD64_COMPARE_MEMBASE_REG;
13431 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
13432 return OP_AMD64_ICOMPARE_MEMBASE_REG;
13441 op_to_op_src2_membase (int load_opcode, int opcode)
13444 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
13450 return OP_X86_COMPARE_REG_MEMBASE;
13452 return OP_X86_ADD_REG_MEMBASE;
13454 return OP_X86_SUB_REG_MEMBASE;
13456 return OP_X86_AND_REG_MEMBASE;
13458 return OP_X86_OR_REG_MEMBASE;
13460 return OP_X86_XOR_REG_MEMBASE;
13464 #ifdef TARGET_AMD64
13465 #ifdef __mono_ilp32__
13466 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE) ) {
13468 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)) {
13472 return OP_AMD64_ICOMPARE_REG_MEMBASE;
13474 return OP_X86_ADD_REG_MEMBASE;
13476 return OP_X86_SUB_REG_MEMBASE;
13478 return OP_X86_AND_REG_MEMBASE;
13480 return OP_X86_OR_REG_MEMBASE;
13482 return OP_X86_XOR_REG_MEMBASE;
13484 #ifdef __mono_ilp32__
13485 } else if (load_opcode == OP_LOADI8_MEMBASE) {
13487 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE)) {
13492 return OP_AMD64_COMPARE_REG_MEMBASE;
13494 return OP_AMD64_ADD_REG_MEMBASE;
13496 return OP_AMD64_SUB_REG_MEMBASE;
13498 return OP_AMD64_AND_REG_MEMBASE;
13500 return OP_AMD64_OR_REG_MEMBASE;
13502 return OP_AMD64_XOR_REG_MEMBASE;
13511 mono_op_to_op_imm_noemul (int opcode)
13514 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
13520 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
13527 #if defined(MONO_ARCH_EMULATE_MUL_DIV)
13532 return mono_op_to_op_imm (opcode);
13537 * mono_handle_global_vregs:
13539 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
13543 mono_handle_global_vregs (MonoCompile *cfg)
13545 gint32 *vreg_to_bb;
13546 MonoBasicBlock *bb;
13549 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
13551 #ifdef MONO_ARCH_SIMD_INTRINSICS
13552 if (cfg->uses_simd_intrinsics)
13553 mono_simd_simplify_indirection (cfg);
13556 /* Find local vregs used in more than one bb */
13557 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13558 MonoInst *ins = bb->code;
13559 int block_num = bb->block_num;
13561 if (cfg->verbose_level > 2)
13562 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
13565 for (; ins; ins = ins->next) {
13566 const char *spec = INS_INFO (ins->opcode);
13567 int regtype = 0, regindex;
13570 if (G_UNLIKELY (cfg->verbose_level > 2))
13571 mono_print_ins (ins);
13573 g_assert (ins->opcode >= MONO_CEE_LAST);
13575 for (regindex = 0; regindex < 4; regindex ++) {
13578 if (regindex == 0) {
13579 regtype = spec [MONO_INST_DEST];
13580 if (regtype == ' ')
13583 } else if (regindex == 1) {
13584 regtype = spec [MONO_INST_SRC1];
13585 if (regtype == ' ')
13588 } else if (regindex == 2) {
13589 regtype = spec [MONO_INST_SRC2];
13590 if (regtype == ' ')
13593 } else if (regindex == 3) {
13594 regtype = spec [MONO_INST_SRC3];
13595 if (regtype == ' ')
13600 #if SIZEOF_REGISTER == 4
13601 /* In the LLVM case, the long opcodes are not decomposed */
13602 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
13604 * Since some instructions reference the original long vreg,
13605 * and some reference the two component vregs, it is quite hard
13606 * to determine when it needs to be global. So be conservative.
13608 if (!get_vreg_to_inst (cfg, vreg)) {
13609 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
13611 if (cfg->verbose_level > 2)
13612 printf ("LONG VREG R%d made global.\n", vreg);
13616 * Make the component vregs volatile since the optimizations can
13617 * get confused otherwise.
13619 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
13620 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
13624 g_assert (vreg != -1);
13626 prev_bb = vreg_to_bb [vreg];
13627 if (prev_bb == 0) {
13628 /* 0 is a valid block num */
13629 vreg_to_bb [vreg] = block_num + 1;
13630 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
13631 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
13634 if (!get_vreg_to_inst (cfg, vreg)) {
13635 if (G_UNLIKELY (cfg->verbose_level > 2))
13636 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
13640 if (vreg_is_ref (cfg, vreg))
13641 mono_compile_create_var_for_vreg (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL, vreg);
13643 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
13646 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
13649 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
13652 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
13655 g_assert_not_reached ();
13659 /* Flag as having been used in more than one bb */
13660 vreg_to_bb [vreg] = -1;
13666 /* If a variable is used in only one bblock, convert it into a local vreg */
13667 for (i = 0; i < cfg->num_varinfo; i++) {
13668 MonoInst *var = cfg->varinfo [i];
13669 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
13671 switch (var->type) {
13677 #if SIZEOF_REGISTER == 8
13680 #if !defined(TARGET_X86)
13681 /* Enabling this screws up the fp stack on x86 */
13684 if (mono_arch_is_soft_float ())
13687 /* Arguments are implicitly global */
13688 /* Putting R4 vars into registers doesn't work currently */
13689 /* The gsharedvt vars are implicitly referenced by ldaddr opcodes, but those opcodes are only generated later */
13690 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg && var != cfg->gsharedvt_info_var && var != cfg->gsharedvt_locals_var && var != cfg->lmf_addr_var) {
13692 * Make that the variable's liveness interval doesn't contain a call, since
13693 * that would cause the lvreg to be spilled, making the whole optimization
13696 /* This is too slow for JIT compilation */
13698 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
13700 int def_index, call_index, ins_index;
13701 gboolean spilled = FALSE;
13706 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
13707 const char *spec = INS_INFO (ins->opcode);
13709 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
13710 def_index = ins_index;
13712 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
13713 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
13714 if (call_index > def_index) {
13720 if (MONO_IS_CALL (ins))
13721 call_index = ins_index;
13731 if (G_UNLIKELY (cfg->verbose_level > 2))
13732 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
13733 var->flags |= MONO_INST_IS_DEAD;
13734 cfg->vreg_to_inst [var->dreg] = NULL;
13741 * Compress the varinfo and vars tables so the liveness computation is faster and
13742 * takes up less space.
13745 for (i = 0; i < cfg->num_varinfo; ++i) {
13746 MonoInst *var = cfg->varinfo [i];
13747 if (pos < i && cfg->locals_start == i)
13748 cfg->locals_start = pos;
13749 if (!(var->flags & MONO_INST_IS_DEAD)) {
13751 cfg->varinfo [pos] = cfg->varinfo [i];
13752 cfg->varinfo [pos]->inst_c0 = pos;
13753 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
13754 cfg->vars [pos].idx = pos;
13755 #if SIZEOF_REGISTER == 4
13756 if (cfg->varinfo [pos]->type == STACK_I8) {
13757 /* Modify the two component vars too */
13760 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
13761 var1->inst_c0 = pos;
13762 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
13763 var1->inst_c0 = pos;
13770 cfg->num_varinfo = pos;
13771 if (cfg->locals_start > cfg->num_varinfo)
13772 cfg->locals_start = cfg->num_varinfo;
13776 * mono_spill_global_vars:
13778 * Generate spill code for variables which are not allocated to registers,
13779 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
13780 * code is generated which could be optimized by the local optimization passes.
13783 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
13785 MonoBasicBlock *bb;
13787 int orig_next_vreg;
13788 guint32 *vreg_to_lvreg;
13790 guint32 i, lvregs_len;
13791 gboolean dest_has_lvreg = FALSE;
13792 guint32 stacktypes [128];
13793 MonoInst **live_range_start, **live_range_end;
13794 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
13795 int *gsharedvt_vreg_to_idx = NULL;
13797 *need_local_opts = FALSE;
13799 memset (spec2, 0, sizeof (spec2));
13801 /* FIXME: Move this function to mini.c */
13802 stacktypes ['i'] = STACK_PTR;
13803 stacktypes ['l'] = STACK_I8;
13804 stacktypes ['f'] = STACK_R8;
13805 #ifdef MONO_ARCH_SIMD_INTRINSICS
13806 stacktypes ['x'] = STACK_VTYPE;
13809 #if SIZEOF_REGISTER == 4
13810 /* Create MonoInsts for longs */
13811 for (i = 0; i < cfg->num_varinfo; i++) {
13812 MonoInst *ins = cfg->varinfo [i];
13814 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
13815 switch (ins->type) {
13820 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
13823 g_assert (ins->opcode == OP_REGOFFSET);
13825 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
13827 tree->opcode = OP_REGOFFSET;
13828 tree->inst_basereg = ins->inst_basereg;
13829 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
13831 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
13833 tree->opcode = OP_REGOFFSET;
13834 tree->inst_basereg = ins->inst_basereg;
13835 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
13845 if (cfg->compute_gc_maps) {
13846 /* registers need liveness info even for !non refs */
13847 for (i = 0; i < cfg->num_varinfo; i++) {
13848 MonoInst *ins = cfg->varinfo [i];
13850 if (ins->opcode == OP_REGVAR)
13851 ins->flags |= MONO_INST_GC_TRACK;
13855 if (cfg->gsharedvt) {
13856 gsharedvt_vreg_to_idx = mono_mempool_alloc0 (cfg->mempool, sizeof (int) * cfg->next_vreg);
13858 for (i = 0; i < cfg->num_varinfo; ++i) {
13859 MonoInst *ins = cfg->varinfo [i];
13862 if (mini_is_gsharedvt_variable_type (ins->inst_vtype)) {
13863 if (i >= cfg->locals_start) {
13865 idx = get_gsharedvt_info_slot (cfg, ins->inst_vtype, MONO_RGCTX_INFO_LOCAL_OFFSET);
13866 gsharedvt_vreg_to_idx [ins->dreg] = idx + 1;
13867 ins->opcode = OP_GSHAREDVT_LOCAL;
13868 ins->inst_imm = idx;
13871 gsharedvt_vreg_to_idx [ins->dreg] = -1;
13872 ins->opcode = OP_GSHAREDVT_ARG_REGOFFSET;
13878 /* FIXME: widening and truncation */
13881 * As an optimization, when a variable allocated to the stack is first loaded into
13882 * an lvreg, we will remember the lvreg and use it the next time instead of loading
13883 * the variable again.
13885 orig_next_vreg = cfg->next_vreg;
13886 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
13887 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
13891 * These arrays contain the first and last instructions accessing a given
13893 * Since we emit bblocks in the same order we process them here, and we
13894 * don't split live ranges, these will precisely describe the live range of
13895 * the variable, i.e. the instruction range where a valid value can be found
13896 * in the variables location.
13897 * The live range is computed using the liveness info computed by the liveness pass.
13898 * We can't use vmv->range, since that is an abstract live range, and we need
13899 * one which is instruction precise.
13900 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
13902 /* FIXME: Only do this if debugging info is requested */
13903 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
13904 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
13905 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
13906 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
13908 /* Add spill loads/stores */
13909 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13912 if (cfg->verbose_level > 2)
13913 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
13915 /* Clear vreg_to_lvreg array */
13916 for (i = 0; i < lvregs_len; i++)
13917 vreg_to_lvreg [lvregs [i]] = 0;
13921 MONO_BB_FOR_EACH_INS (bb, ins) {
13922 const char *spec = INS_INFO (ins->opcode);
13923 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
13924 gboolean store, no_lvreg;
13925 int sregs [MONO_MAX_SRC_REGS];
13927 if (G_UNLIKELY (cfg->verbose_level > 2))
13928 mono_print_ins (ins);
13930 if (ins->opcode == OP_NOP)
13934 * We handle LDADDR here as well, since it can only be decomposed
13935 * when variable addresses are known.
13937 if (ins->opcode == OP_LDADDR) {
13938 MonoInst *var = ins->inst_p0;
13940 if (var->opcode == OP_VTARG_ADDR) {
13941 /* Happens on SPARC/S390 where vtypes are passed by reference */
13942 MonoInst *vtaddr = var->inst_left;
13943 if (vtaddr->opcode == OP_REGVAR) {
13944 ins->opcode = OP_MOVE;
13945 ins->sreg1 = vtaddr->dreg;
13947 else if (var->inst_left->opcode == OP_REGOFFSET) {
13948 ins->opcode = OP_LOAD_MEMBASE;
13949 ins->inst_basereg = vtaddr->inst_basereg;
13950 ins->inst_offset = vtaddr->inst_offset;
13953 } else if (cfg->gsharedvt && gsharedvt_vreg_to_idx [var->dreg] < 0) {
13954 /* gsharedvt arg passed by ref */
13955 g_assert (var->opcode == OP_GSHAREDVT_ARG_REGOFFSET);
13957 ins->opcode = OP_LOAD_MEMBASE;
13958 ins->inst_basereg = var->inst_basereg;
13959 ins->inst_offset = var->inst_offset;
13960 } else if (cfg->gsharedvt && gsharedvt_vreg_to_idx [var->dreg]) {
13961 MonoInst *load, *load2, *load3;
13962 int idx = gsharedvt_vreg_to_idx [var->dreg] - 1;
13963 int reg1, reg2, reg3;
13964 MonoInst *info_var = cfg->gsharedvt_info_var;
13965 MonoInst *locals_var = cfg->gsharedvt_locals_var;
13969 * Compute the address of the local as gsharedvt_locals_var + gsharedvt_info_var->locals_offsets [idx].
13972 g_assert (var->opcode == OP_GSHAREDVT_LOCAL);
13974 g_assert (info_var);
13975 g_assert (locals_var);
13977 /* Mark the instruction used to compute the locals var as used */
13978 cfg->gsharedvt_locals_var_ins = NULL;
13980 /* Load the offset */
13981 if (info_var->opcode == OP_REGOFFSET) {
13982 reg1 = alloc_ireg (cfg);
13983 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, reg1, info_var->inst_basereg, info_var->inst_offset);
13984 } else if (info_var->opcode == OP_REGVAR) {
13986 reg1 = info_var->dreg;
13988 g_assert_not_reached ();
13990 reg2 = alloc_ireg (cfg);
13991 NEW_LOAD_MEMBASE (cfg, load2, OP_LOADI4_MEMBASE, reg2, reg1, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
13992 /* Load the locals area address */
13993 reg3 = alloc_ireg (cfg);
13994 if (locals_var->opcode == OP_REGOFFSET) {
13995 NEW_LOAD_MEMBASE (cfg, load3, OP_LOAD_MEMBASE, reg3, locals_var->inst_basereg, locals_var->inst_offset);
13996 } else if (locals_var->opcode == OP_REGVAR) {
13997 NEW_UNALU (cfg, load3, OP_MOVE, reg3, locals_var->dreg);
13999 g_assert_not_reached ();
14001 /* Compute the address */
14002 ins->opcode = OP_PADD;
14006 mono_bblock_insert_before_ins (bb, ins, load3);
14007 mono_bblock_insert_before_ins (bb, load3, load2);
14009 mono_bblock_insert_before_ins (bb, load2, load);
14011 g_assert (var->opcode == OP_REGOFFSET);
14013 ins->opcode = OP_ADD_IMM;
14014 ins->sreg1 = var->inst_basereg;
14015 ins->inst_imm = var->inst_offset;
14018 *need_local_opts = TRUE;
14019 spec = INS_INFO (ins->opcode);
14022 if (ins->opcode < MONO_CEE_LAST) {
14023 mono_print_ins (ins);
14024 g_assert_not_reached ();
14028 * Store opcodes have destbasereg in the dreg, but in reality, it is an
14032 if (MONO_IS_STORE_MEMBASE (ins)) {
14033 tmp_reg = ins->dreg;
14034 ins->dreg = ins->sreg2;
14035 ins->sreg2 = tmp_reg;
14038 spec2 [MONO_INST_DEST] = ' ';
14039 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
14040 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
14041 spec2 [MONO_INST_SRC3] = ' ';
14043 } else if (MONO_IS_STORE_MEMINDEX (ins))
14044 g_assert_not_reached ();
14049 if (G_UNLIKELY (cfg->verbose_level > 2)) {
14050 printf ("\t %.3s %d", spec, ins->dreg);
14051 num_sregs = mono_inst_get_src_registers (ins, sregs);
14052 for (srcindex = 0; srcindex < num_sregs; ++srcindex)
14053 printf (" %d", sregs [srcindex]);
14060 regtype = spec [MONO_INST_DEST];
14061 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
14064 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
14065 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
14066 MonoInst *store_ins;
14068 MonoInst *def_ins = ins;
14069 int dreg = ins->dreg; /* The original vreg */
14071 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
14073 if (var->opcode == OP_REGVAR) {
14074 ins->dreg = var->dreg;
14075 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
14077 * Instead of emitting a load+store, use a _membase opcode.
14079 g_assert (var->opcode == OP_REGOFFSET);
14080 if (ins->opcode == OP_MOVE) {
14084 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
14085 ins->inst_basereg = var->inst_basereg;
14086 ins->inst_offset = var->inst_offset;
14089 spec = INS_INFO (ins->opcode);
14093 g_assert (var->opcode == OP_REGOFFSET);
14095 prev_dreg = ins->dreg;
14097 /* Invalidate any previous lvreg for this vreg */
14098 vreg_to_lvreg [ins->dreg] = 0;
14102 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
14104 store_opcode = OP_STOREI8_MEMBASE_REG;
14107 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
14109 #if SIZEOF_REGISTER != 8
14110 if (regtype == 'l') {
14111 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
14112 mono_bblock_insert_after_ins (bb, ins, store_ins);
14113 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
14114 mono_bblock_insert_after_ins (bb, ins, store_ins);
14115 def_ins = store_ins;
14120 g_assert (store_opcode != OP_STOREV_MEMBASE);
14122 /* Try to fuse the store into the instruction itself */
14123 /* FIXME: Add more instructions */
14124 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
14125 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
14126 ins->inst_imm = ins->inst_c0;
14127 ins->inst_destbasereg = var->inst_basereg;
14128 ins->inst_offset = var->inst_offset;
14129 spec = INS_INFO (ins->opcode);
14130 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE) || (ins->opcode == OP_RMOVE))) {
14131 ins->opcode = store_opcode;
14132 ins->inst_destbasereg = var->inst_basereg;
14133 ins->inst_offset = var->inst_offset;
14137 tmp_reg = ins->dreg;
14138 ins->dreg = ins->sreg2;
14139 ins->sreg2 = tmp_reg;
14142 spec2 [MONO_INST_DEST] = ' ';
14143 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
14144 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
14145 spec2 [MONO_INST_SRC3] = ' ';
14147 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
14148 // FIXME: The backends expect the base reg to be in inst_basereg
14149 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
14151 ins->inst_basereg = var->inst_basereg;
14152 ins->inst_offset = var->inst_offset;
14153 spec = INS_INFO (ins->opcode);
14155 /* printf ("INS: "); mono_print_ins (ins); */
14156 /* Create a store instruction */
14157 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
14159 /* Insert it after the instruction */
14160 mono_bblock_insert_after_ins (bb, ins, store_ins);
14162 def_ins = store_ins;
14165 * We can't assign ins->dreg to var->dreg here, since the
14166 * sregs could use it. So set a flag, and do it after
14169 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
14170 dest_has_lvreg = TRUE;
14175 if (def_ins && !live_range_start [dreg]) {
14176 live_range_start [dreg] = def_ins;
14177 live_range_start_bb [dreg] = bb;
14180 if (cfg->compute_gc_maps && def_ins && (var->flags & MONO_INST_GC_TRACK)) {
14183 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_DEF);
14184 tmp->inst_c1 = dreg;
14185 mono_bblock_insert_after_ins (bb, def_ins, tmp);
14192 num_sregs = mono_inst_get_src_registers (ins, sregs);
14193 for (srcindex = 0; srcindex < 3; ++srcindex) {
14194 regtype = spec [MONO_INST_SRC1 + srcindex];
14195 sreg = sregs [srcindex];
14197 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
14198 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
14199 MonoInst *var = get_vreg_to_inst (cfg, sreg);
14200 MonoInst *use_ins = ins;
14201 MonoInst *load_ins;
14202 guint32 load_opcode;
14204 if (var->opcode == OP_REGVAR) {
14205 sregs [srcindex] = var->dreg;
14206 //mono_inst_set_src_registers (ins, sregs);
14207 live_range_end [sreg] = use_ins;
14208 live_range_end_bb [sreg] = bb;
14210 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
14213 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
14214 /* var->dreg is a hreg */
14215 tmp->inst_c1 = sreg;
14216 mono_bblock_insert_after_ins (bb, ins, tmp);
14222 g_assert (var->opcode == OP_REGOFFSET);
14224 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
14226 g_assert (load_opcode != OP_LOADV_MEMBASE);
14228 if (vreg_to_lvreg [sreg]) {
14229 g_assert (vreg_to_lvreg [sreg] != -1);
14231 /* The variable is already loaded to an lvreg */
14232 if (G_UNLIKELY (cfg->verbose_level > 2))
14233 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
14234 sregs [srcindex] = vreg_to_lvreg [sreg];
14235 //mono_inst_set_src_registers (ins, sregs);
14239 /* Try to fuse the load into the instruction */
14240 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
14241 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
14242 sregs [0] = var->inst_basereg;
14243 //mono_inst_set_src_registers (ins, sregs);
14244 ins->inst_offset = var->inst_offset;
14245 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
14246 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
14247 sregs [1] = var->inst_basereg;
14248 //mono_inst_set_src_registers (ins, sregs);
14249 ins->inst_offset = var->inst_offset;
14251 if (MONO_IS_REAL_MOVE (ins)) {
14252 ins->opcode = OP_NOP;
14255 //printf ("%d ", srcindex); mono_print_ins (ins);
14257 sreg = alloc_dreg (cfg, stacktypes [regtype]);
14259 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
14260 if (var->dreg == prev_dreg) {
14262 * sreg refers to the value loaded by the load
14263 * emitted below, but we need to use ins->dreg
14264 * since it refers to the store emitted earlier.
14268 g_assert (sreg != -1);
14269 vreg_to_lvreg [var->dreg] = sreg;
14270 g_assert (lvregs_len < 1024);
14271 lvregs [lvregs_len ++] = var->dreg;
14275 sregs [srcindex] = sreg;
14276 //mono_inst_set_src_registers (ins, sregs);
14278 #if SIZEOF_REGISTER != 8
14279 if (regtype == 'l') {
14280 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
14281 mono_bblock_insert_before_ins (bb, ins, load_ins);
14282 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
14283 mono_bblock_insert_before_ins (bb, ins, load_ins);
14284 use_ins = load_ins;
14289 #if SIZEOF_REGISTER == 4
14290 g_assert (load_opcode != OP_LOADI8_MEMBASE);
14292 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
14293 mono_bblock_insert_before_ins (bb, ins, load_ins);
14294 use_ins = load_ins;
14298 if (var->dreg < orig_next_vreg) {
14299 live_range_end [var->dreg] = use_ins;
14300 live_range_end_bb [var->dreg] = bb;
14303 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
14306 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
14307 tmp->inst_c1 = var->dreg;
14308 mono_bblock_insert_after_ins (bb, ins, tmp);
14312 mono_inst_set_src_registers (ins, sregs);
14314 if (dest_has_lvreg) {
14315 g_assert (ins->dreg != -1);
14316 vreg_to_lvreg [prev_dreg] = ins->dreg;
14317 g_assert (lvregs_len < 1024);
14318 lvregs [lvregs_len ++] = prev_dreg;
14319 dest_has_lvreg = FALSE;
14323 tmp_reg = ins->dreg;
14324 ins->dreg = ins->sreg2;
14325 ins->sreg2 = tmp_reg;
14328 if (MONO_IS_CALL (ins)) {
14329 /* Clear vreg_to_lvreg array */
14330 for (i = 0; i < lvregs_len; i++)
14331 vreg_to_lvreg [lvregs [i]] = 0;
14333 } else if (ins->opcode == OP_NOP) {
14335 MONO_INST_NULLIFY_SREGS (ins);
14338 if (cfg->verbose_level > 2)
14339 mono_print_ins_index (1, ins);
14342 /* Extend the live range based on the liveness info */
14343 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
14344 for (i = 0; i < cfg->num_varinfo; i ++) {
14345 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
14347 if (vreg_is_volatile (cfg, vi->vreg))
14348 /* The liveness info is incomplete */
14351 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
14352 /* Live from at least the first ins of this bb */
14353 live_range_start [vi->vreg] = bb->code;
14354 live_range_start_bb [vi->vreg] = bb;
14357 if (mono_bitset_test_fast (bb->live_out_set, i)) {
14358 /* Live at least until the last ins of this bb */
14359 live_range_end [vi->vreg] = bb->last_ins;
14360 live_range_end_bb [vi->vreg] = bb;
14366 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
14368 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
14369 * by storing the current native offset into MonoMethodVar->live_range_start/end.
14371 if (cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
14372 for (i = 0; i < cfg->num_varinfo; ++i) {
14373 int vreg = MONO_VARINFO (cfg, i)->vreg;
14376 if (live_range_start [vreg]) {
14377 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
14379 ins->inst_c1 = vreg;
14380 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
14382 if (live_range_end [vreg]) {
14383 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
14385 ins->inst_c1 = vreg;
14386 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
14387 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
14389 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
14395 if (cfg->gsharedvt_locals_var_ins) {
14396 /* Nullify if unused */
14397 cfg->gsharedvt_locals_var_ins->opcode = OP_PCONST;
14398 cfg->gsharedvt_locals_var_ins->inst_imm = 0;
14401 g_free (live_range_start);
14402 g_free (live_range_end);
14403 g_free (live_range_start_bb);
14404 g_free (live_range_end_bb);
14409 * - use 'iadd' instead of 'int_add'
14410 * - handling ovf opcodes: decompose in method_to_ir.
14411 * - unify iregs/fregs
14412 * -> partly done, the missing parts are:
14413 * - a more complete unification would involve unifying the hregs as well, so
14414 * code wouldn't need if (fp) all over the place. but that would mean the hregs
14415 * would no longer map to the machine hregs, so the code generators would need to
14416 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
14417 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
14418 * fp/non-fp branches speeds it up by about 15%.
14419 * - use sext/zext opcodes instead of shifts
14421 * - get rid of TEMPLOADs if possible and use vregs instead
14422 * - clean up usage of OP_P/OP_ opcodes
14423 * - cleanup usage of DUMMY_USE
14424 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
14426 * - set the stack type and allocate a dreg in the EMIT_NEW macros
14427 * - get rid of all the <foo>2 stuff when the new JIT is ready.
14428 * - make sure handle_stack_args () is called before the branch is emitted
14429 * - when the new IR is done, get rid of all unused stuff
14430 * - COMPARE/BEQ as separate instructions or unify them ?
14431 * - keeping them separate allows specialized compare instructions like
14432 * compare_imm, compare_membase
14433 * - most back ends unify fp compare+branch, fp compare+ceq
14434 * - integrate mono_save_args into inline_method
14435 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
14436 * - handle long shift opts on 32 bit platforms somehow: they require
14437 * 3 sregs (2 for arg1 and 1 for arg2)
14438 * - make byref a 'normal' type.
14439 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
14440 * variable if needed.
14441 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
14442 * like inline_method.
14443 * - remove inlining restrictions
14444 * - fix LNEG and enable cfold of INEG
14445 * - generalize x86 optimizations like ldelema as a peephole optimization
14446 * - add store_mem_imm for amd64
14447 * - optimize the loading of the interruption flag in the managed->native wrappers
14448 * - avoid special handling of OP_NOP in passes
14449 * - move code inserting instructions into one function/macro.
14450 * - try a coalescing phase after liveness analysis
14451 * - add float -> vreg conversion + local optimizations on !x86
14452 * - figure out how to handle decomposed branches during optimizations, ie.
14453 * compare+branch, op_jump_table+op_br etc.
14454 * - promote RuntimeXHandles to vregs
14455 * - vtype cleanups:
14456 * - add a NEW_VARLOADA_VREG macro
14457 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
14458 * accessing vtype fields.
14459 * - get rid of I8CONST on 64 bit platforms
14460 * - dealing with the increase in code size due to branches created during opcode
14462 * - use extended basic blocks
14463 * - all parts of the JIT
14464 * - handle_global_vregs () && local regalloc
14465 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
14466 * - sources of increase in code size:
14469 * - isinst and castclass
14470 * - lvregs not allocated to global registers even if used multiple times
14471 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
14473 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
14474 * - add all micro optimizations from the old JIT
14475 * - put tree optimizations into the deadce pass
14476 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
14477 * specific function.
14478 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
14479 * fcompare + branchCC.
14480 * - create a helper function for allocating a stack slot, taking into account
14481 * MONO_CFG_HAS_SPILLUP.
14483 * - merge the ia64 switch changes.
14484 * - optimize mono_regstate2_alloc_int/float.
14485 * - fix the pessimistic handling of variables accessed in exception handler blocks.
14486 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
14487 * parts of the tree could be separated by other instructions, killing the tree
14488 * arguments, or stores killing loads etc. Also, should we fold loads into other
14489 * instructions if the result of the load is used multiple times ?
14490 * - make the REM_IMM optimization in mini-x86.c arch-independent.
14491 * - LAST MERGE: 108395.
14492 * - when returning vtypes in registers, generate IR and append it to the end of the
14493 * last bb instead of doing it in the epilog.
14494 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
14502 - When to decompose opcodes:
14503 - earlier: this makes some optimizations hard to implement, since the low level IR
14504 no longer contains the neccessary information. But it is easier to do.
14505 - later: harder to implement, enables more optimizations.
14506 - Branches inside bblocks:
14507 - created when decomposing complex opcodes.
14508 - branches to another bblock: harmless, but not tracked by the branch
14509 optimizations, so need to branch to a label at the start of the bblock.
14510 - branches to inside the same bblock: very problematic, trips up the local
14511 reg allocator. Can be fixed by spitting the current bblock, but that is a
14512 complex operation, since some local vregs can become global vregs etc.
14513 - Local/global vregs:
14514 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
14515 local register allocator.
14516 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
14517 structure, created by mono_create_var (). Assigned to hregs or the stack by
14518 the global register allocator.
14519 - When to do optimizations like alu->alu_imm:
14520 - earlier -> saves work later on since the IR will be smaller/simpler
14521 - later -> can work on more instructions
14522 - Handling of valuetypes:
14523 - When a vtype is pushed on the stack, a new temporary is created, an
14524 instruction computing its address (LDADDR) is emitted and pushed on
14525 the stack. Need to optimize cases when the vtype is used immediately as in
14526 argument passing, stloc etc.
14527 - Instead of the to_end stuff in the old JIT, simply call the function handling
14528 the values on the stack before emitting the last instruction of the bb.
14531 #endif /* DISABLE_JIT */