2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
9 * Copyright 2003-2010 Novell, Inc (http://www.novell.com)
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
27 #ifdef HAVE_SYS_TIME_H
35 #include <mono/utils/memcheck.h>
37 #include <mono/metadata/abi-details.h>
38 #include <mono/metadata/assembly.h>
39 #include <mono/metadata/attrdefs.h>
40 #include <mono/metadata/loader.h>
41 #include <mono/metadata/tabledefs.h>
42 #include <mono/metadata/class.h>
43 #include <mono/metadata/object.h>
44 #include <mono/metadata/exception.h>
45 #include <mono/metadata/opcodes.h>
46 #include <mono/metadata/mono-endian.h>
47 #include <mono/metadata/tokentype.h>
48 #include <mono/metadata/tabledefs.h>
49 #include <mono/metadata/marshal.h>
50 #include <mono/metadata/debug-helpers.h>
51 #include <mono/metadata/mono-debug.h>
52 #include <mono/metadata/gc-internal.h>
53 #include <mono/metadata/security-manager.h>
54 #include <mono/metadata/threads-types.h>
55 #include <mono/metadata/security-core-clr.h>
56 #include <mono/metadata/monitor.h>
57 #include <mono/metadata/profiler-private.h>
58 #include <mono/metadata/profiler.h>
59 #include <mono/metadata/debug-mono-symfile.h>
60 #include <mono/utils/mono-compiler.h>
61 #include <mono/utils/mono-memory-model.h>
62 #include <mono/metadata/mono-basic-block.h>
68 #include "jit-icalls.h"
70 #include "debugger-agent.h"
71 #include "seq-points.h"
73 #define BRANCH_COST 10
74 #define INLINE_LENGTH_LIMIT 20
76 /* These have 'cfg' as an implicit argument */
77 #define INLINE_FAILURE(msg) do { \
78 if ((cfg->method != cfg->current_method) && (cfg->current_method->wrapper_type == MONO_WRAPPER_NONE)) { \
79 inline_failure (cfg, msg); \
80 goto exception_exit; \
83 #define CHECK_CFG_EXCEPTION do {\
84 if (cfg->exception_type != MONO_EXCEPTION_NONE) \
85 goto exception_exit; \
87 #define METHOD_ACCESS_FAILURE(method, cmethod) do { \
88 method_access_failure ((cfg), (method), (cmethod)); \
89 goto exception_exit; \
91 #define FIELD_ACCESS_FAILURE(method, field) do { \
92 field_access_failure ((cfg), (method), (field)); \
93 goto exception_exit; \
95 #define GENERIC_SHARING_FAILURE(opcode) do { \
97 gshared_failure (cfg, opcode, __FILE__, __LINE__); \
98 goto exception_exit; \
101 #define GSHAREDVT_FAILURE(opcode) do { \
102 if (cfg->gsharedvt) { \
103 gsharedvt_failure (cfg, opcode, __FILE__, __LINE__); \
104 goto exception_exit; \
107 #define OUT_OF_MEMORY_FAILURE do { \
108 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OUT_OF_MEMORY); \
109 goto exception_exit; \
111 #define DISABLE_AOT(cfg) do { \
112 if ((cfg)->verbose_level >= 2) \
113 printf ("AOT disabled: %s:%d\n", __FILE__, __LINE__); \
114 (cfg)->disable_aot = TRUE; \
116 #define LOAD_ERROR do { \
117 break_on_unverified (); \
118 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD); \
119 goto exception_exit; \
122 #define TYPE_LOAD_ERROR(klass) do { \
123 cfg->exception_ptr = klass; \
127 #define CHECK_CFG_ERROR do {\
128 if (!mono_error_ok (&cfg->error)) { \
129 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); \
130 goto mono_error_exit; \
134 /* Determine whenever 'ins' represents a load of the 'this' argument */
135 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
137 static int ldind_to_load_membase (int opcode);
138 static int stind_to_store_membase (int opcode);
140 int mono_op_to_op_imm (int opcode);
141 int mono_op_to_op_imm_noemul (int opcode);
143 MONO_API MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
145 static int inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
146 guchar *ip, guint real_offset, gboolean inline_always, MonoBasicBlock **out_cbb);
148 /* helper methods signatures */
149 static MonoMethodSignature *helper_sig_class_init_trampoline;
150 static MonoMethodSignature *helper_sig_domain_get;
151 static MonoMethodSignature *helper_sig_generic_class_init_trampoline;
152 static MonoMethodSignature *helper_sig_generic_class_init_trampoline_llvm;
153 static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
154 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline;
155 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline_llvm;
156 static MonoMethodSignature *helper_sig_monitor_enter_v4_trampoline_llvm;
159 * Instruction metadata
167 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
168 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
174 #if SIZEOF_REGISTER == 8 && SIZEOF_REGISTER == SIZEOF_VOID_P
179 /* keep in sync with the enum in mini.h */
182 #include "mini-ops.h"
187 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
188 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
190 * This should contain the index of the last sreg + 1. This is not the same
191 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
193 const gint8 ins_sreg_counts[] = {
194 #include "mini-ops.h"
199 #define MONO_INIT_VARINFO(vi,id) do { \
200 (vi)->range.first_use.pos.bid = 0xffff; \
206 mono_alloc_ireg (MonoCompile *cfg)
208 return alloc_ireg (cfg);
212 mono_alloc_lreg (MonoCompile *cfg)
214 return alloc_lreg (cfg);
218 mono_alloc_freg (MonoCompile *cfg)
220 return alloc_freg (cfg);
224 mono_alloc_preg (MonoCompile *cfg)
226 return alloc_preg (cfg);
230 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
232 return alloc_dreg (cfg, stack_type);
236 * mono_alloc_ireg_ref:
238 * Allocate an IREG, and mark it as holding a GC ref.
241 mono_alloc_ireg_ref (MonoCompile *cfg)
243 return alloc_ireg_ref (cfg);
247 * mono_alloc_ireg_mp:
249 * Allocate an IREG, and mark it as holding a managed pointer.
252 mono_alloc_ireg_mp (MonoCompile *cfg)
254 return alloc_ireg_mp (cfg);
258 * mono_alloc_ireg_copy:
260 * Allocate an IREG with the same GC type as VREG.
263 mono_alloc_ireg_copy (MonoCompile *cfg, guint32 vreg)
265 if (vreg_is_ref (cfg, vreg))
266 return alloc_ireg_ref (cfg);
267 else if (vreg_is_mp (cfg, vreg))
268 return alloc_ireg_mp (cfg);
270 return alloc_ireg (cfg);
274 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
279 type = mini_get_underlying_type (cfg, type);
281 switch (type->type) {
294 case MONO_TYPE_FNPTR:
296 case MONO_TYPE_CLASS:
297 case MONO_TYPE_STRING:
298 case MONO_TYPE_OBJECT:
299 case MONO_TYPE_SZARRAY:
300 case MONO_TYPE_ARRAY:
304 #if SIZEOF_REGISTER == 8
310 return cfg->r4fp ? OP_RMOVE : OP_FMOVE;
313 case MONO_TYPE_VALUETYPE:
314 if (type->data.klass->enumtype) {
315 type = mono_class_enum_basetype (type->data.klass);
318 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
321 case MONO_TYPE_TYPEDBYREF:
323 case MONO_TYPE_GENERICINST:
324 type = &type->data.generic_class->container_class->byval_arg;
328 g_assert (cfg->generic_sharing_context);
329 if (mini_type_var_is_vt (cfg, type))
332 return mono_type_to_regmove (cfg, mini_get_underlying_type (cfg, type));
334 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
340 mono_print_bb (MonoBasicBlock *bb, const char *msg)
345 printf ("\n%s %d: [IN: ", msg, bb->block_num);
346 for (i = 0; i < bb->in_count; ++i)
347 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
349 for (i = 0; i < bb->out_count; ++i)
350 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
352 for (tree = bb->code; tree; tree = tree->next)
353 mono_print_ins_index (-1, tree);
357 mono_create_helper_signatures (void)
359 helper_sig_domain_get = mono_create_icall_signature ("ptr");
360 helper_sig_class_init_trampoline = mono_create_icall_signature ("void");
361 helper_sig_generic_class_init_trampoline = mono_create_icall_signature ("void");
362 helper_sig_generic_class_init_trampoline_llvm = mono_create_icall_signature ("void ptr");
363 helper_sig_rgctx_lazy_fetch_trampoline = mono_create_icall_signature ("ptr ptr");
364 helper_sig_monitor_enter_exit_trampoline = mono_create_icall_signature ("void");
365 helper_sig_monitor_enter_exit_trampoline_llvm = mono_create_icall_signature ("void object");
366 helper_sig_monitor_enter_v4_trampoline_llvm = mono_create_icall_signature ("void object ptr");
369 static MONO_NEVER_INLINE void
370 break_on_unverified (void)
372 if (mini_get_debug_options ()->break_on_unverified)
376 static MONO_NEVER_INLINE void
377 method_access_failure (MonoCompile *cfg, MonoMethod *method, MonoMethod *cil_method)
379 char *method_fname = mono_method_full_name (method, TRUE);
380 char *cil_method_fname = mono_method_full_name (cil_method, TRUE);
381 mono_cfg_set_exception (cfg, MONO_EXCEPTION_METHOD_ACCESS);
382 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname);
383 g_free (method_fname);
384 g_free (cil_method_fname);
387 static MONO_NEVER_INLINE void
388 field_access_failure (MonoCompile *cfg, MonoMethod *method, MonoClassField *field)
390 char *method_fname = mono_method_full_name (method, TRUE);
391 char *field_fname = mono_field_full_name (field);
392 mono_cfg_set_exception (cfg, MONO_EXCEPTION_FIELD_ACCESS);
393 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname);
394 g_free (method_fname);
395 g_free (field_fname);
398 static MONO_NEVER_INLINE void
399 inline_failure (MonoCompile *cfg, const char *msg)
401 if (cfg->verbose_level >= 2)
402 printf ("inline failed: %s\n", msg);
403 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INLINE_FAILED);
406 static MONO_NEVER_INLINE void
407 gshared_failure (MonoCompile *cfg, int opcode, const char *file, int line)
409 if (cfg->verbose_level > 2) \
410 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), __LINE__);
411 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
414 static MONO_NEVER_INLINE void
415 gsharedvt_failure (MonoCompile *cfg, int opcode, const char *file, int line)
417 cfg->exception_message = g_strdup_printf ("gsharedvt failed for method %s.%s.%s/%d opcode %s %s:%d", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), file, line);
418 if (cfg->verbose_level >= 2)
419 printf ("%s\n", cfg->exception_message);
420 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
424 * When using gsharedvt, some instatiations might be verifiable, and some might be not. i.e.
425 * foo<T> (int i) { ldarg.0; box T; }
427 #define UNVERIFIED do { \
428 if (cfg->gsharedvt) { \
429 if (cfg->verbose_level > 2) \
430 printf ("gsharedvt method failed to verify, falling back to instantiation.\n"); \
431 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
432 goto exception_exit; \
434 break_on_unverified (); \
438 #define GET_BBLOCK(cfg,tblock,ip) do { \
439 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
441 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
442 NEW_BBLOCK (cfg, (tblock)); \
443 (tblock)->cil_code = (ip); \
444 ADD_BBLOCK (cfg, (tblock)); \
448 #if defined(TARGET_X86) || defined(TARGET_AMD64)
449 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
450 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
451 (dest)->dreg = alloc_ireg_mp ((cfg)); \
452 (dest)->sreg1 = (sr1); \
453 (dest)->sreg2 = (sr2); \
454 (dest)->inst_imm = (imm); \
455 (dest)->backend.shift_amount = (shift); \
456 MONO_ADD_INS ((cfg)->cbb, (dest)); \
460 /* Emit conversions so both operands of a binary opcode are of the same type */
462 add_widen_op (MonoCompile *cfg, MonoInst *ins, MonoInst **arg1_ref, MonoInst **arg2_ref)
464 MonoInst *arg1 = *arg1_ref;
465 MonoInst *arg2 = *arg2_ref;
468 ((arg1->type == STACK_R4 && arg2->type == STACK_R8) ||
469 (arg1->type == STACK_R8 && arg2->type == STACK_R4))) {
472 /* Mixing r4/r8 is allowed by the spec */
473 if (arg1->type == STACK_R4) {
474 int dreg = alloc_freg (cfg);
476 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg1->dreg);
477 conv->type = STACK_R8;
481 if (arg2->type == STACK_R4) {
482 int dreg = alloc_freg (cfg);
484 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg2->dreg);
485 conv->type = STACK_R8;
491 #if SIZEOF_REGISTER == 8
492 /* FIXME: Need to add many more cases */
493 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) {
496 int dr = alloc_preg (cfg);
497 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg);
498 (ins)->sreg2 = widen->dreg;
503 #define ADD_BINOP(op) do { \
504 MONO_INST_NEW (cfg, ins, (op)); \
506 ins->sreg1 = sp [0]->dreg; \
507 ins->sreg2 = sp [1]->dreg; \
508 type_from_op (cfg, ins, sp [0], sp [1]); \
510 /* Have to insert a widening op */ \
511 add_widen_op (cfg, ins, &sp [0], &sp [1]); \
512 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
513 MONO_ADD_INS ((cfg)->cbb, (ins)); \
514 *sp++ = mono_decompose_opcode ((cfg), (ins), &bblock); \
517 #define ADD_UNOP(op) do { \
518 MONO_INST_NEW (cfg, ins, (op)); \
520 ins->sreg1 = sp [0]->dreg; \
521 type_from_op (cfg, ins, sp [0], NULL); \
523 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
524 MONO_ADD_INS ((cfg)->cbb, (ins)); \
525 *sp++ = mono_decompose_opcode (cfg, ins, &bblock); \
528 #define ADD_BINCOND(next_block) do { \
531 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
532 cmp->sreg1 = sp [0]->dreg; \
533 cmp->sreg2 = sp [1]->dreg; \
534 type_from_op (cfg, cmp, sp [0], sp [1]); \
536 add_widen_op (cfg, cmp, &sp [0], &sp [1]); \
537 type_from_op (cfg, ins, sp [0], sp [1]); \
538 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
539 GET_BBLOCK (cfg, tblock, target); \
540 link_bblock (cfg, bblock, tblock); \
541 ins->inst_true_bb = tblock; \
542 if ((next_block)) { \
543 link_bblock (cfg, bblock, (next_block)); \
544 ins->inst_false_bb = (next_block); \
545 start_new_bblock = 1; \
547 GET_BBLOCK (cfg, tblock, ip); \
548 link_bblock (cfg, bblock, tblock); \
549 ins->inst_false_bb = tblock; \
550 start_new_bblock = 2; \
552 if (sp != stack_start) { \
553 handle_stack_args (cfg, stack_start, sp - stack_start); \
554 CHECK_UNVERIFIABLE (cfg); \
556 MONO_ADD_INS (bblock, cmp); \
557 MONO_ADD_INS (bblock, ins); \
561 * link_bblock: Links two basic blocks
563 * links two basic blocks in the control flow graph, the 'from'
564 * argument is the starting block and the 'to' argument is the block
565 * the control flow ends to after 'from'.
568 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
570 MonoBasicBlock **newa;
574 if (from->cil_code) {
576 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
578 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
581 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
583 printf ("edge from entry to exit\n");
588 for (i = 0; i < from->out_count; ++i) {
589 if (to == from->out_bb [i]) {
595 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
596 for (i = 0; i < from->out_count; ++i) {
597 newa [i] = from->out_bb [i];
605 for (i = 0; i < to->in_count; ++i) {
606 if (from == to->in_bb [i]) {
612 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
613 for (i = 0; i < to->in_count; ++i) {
614 newa [i] = to->in_bb [i];
623 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
625 link_bblock (cfg, from, to);
629 * mono_find_block_region:
631 * We mark each basic block with a region ID. We use that to avoid BB
632 * optimizations when blocks are in different regions.
635 * A region token that encodes where this region is, and information
636 * about the clause owner for this block.
638 * The region encodes the try/catch/filter clause that owns this block
639 * as well as the type. -1 is a special value that represents a block
640 * that is in none of try/catch/filter.
643 mono_find_block_region (MonoCompile *cfg, int offset)
645 MonoMethodHeader *header = cfg->header;
646 MonoExceptionClause *clause;
649 for (i = 0; i < header->num_clauses; ++i) {
650 clause = &header->clauses [i];
651 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
652 (offset < (clause->handler_offset)))
653 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
655 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
656 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
657 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
658 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
659 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
661 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
664 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
665 return ((i + 1) << 8) | clause->flags;
672 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
674 MonoMethodHeader *header = cfg->header;
675 MonoExceptionClause *clause;
679 for (i = 0; i < header->num_clauses; ++i) {
680 clause = &header->clauses [i];
681 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
682 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
683 if (clause->flags == type)
684 res = g_list_append (res, clause);
691 mono_create_spvar_for_region (MonoCompile *cfg, int region)
695 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
699 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
700 /* prevent it from being register allocated */
701 var->flags |= MONO_INST_VOLATILE;
703 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
707 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
709 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
713 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
717 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
721 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
722 /* prevent it from being register allocated */
723 var->flags |= MONO_INST_VOLATILE;
725 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
731 * Returns the type used in the eval stack when @type is loaded.
732 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
735 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
739 type = mini_get_underlying_type (cfg, type);
740 inst->klass = klass = mono_class_from_mono_type (type);
742 inst->type = STACK_MP;
747 switch (type->type) {
749 inst->type = STACK_INV;
757 inst->type = STACK_I4;
762 case MONO_TYPE_FNPTR:
763 inst->type = STACK_PTR;
765 case MONO_TYPE_CLASS:
766 case MONO_TYPE_STRING:
767 case MONO_TYPE_OBJECT:
768 case MONO_TYPE_SZARRAY:
769 case MONO_TYPE_ARRAY:
770 inst->type = STACK_OBJ;
774 inst->type = STACK_I8;
777 inst->type = cfg->r4_stack_type;
780 inst->type = STACK_R8;
782 case MONO_TYPE_VALUETYPE:
783 if (type->data.klass->enumtype) {
784 type = mono_class_enum_basetype (type->data.klass);
788 inst->type = STACK_VTYPE;
791 case MONO_TYPE_TYPEDBYREF:
792 inst->klass = mono_defaults.typed_reference_class;
793 inst->type = STACK_VTYPE;
795 case MONO_TYPE_GENERICINST:
796 type = &type->data.generic_class->container_class->byval_arg;
800 g_assert (cfg->generic_sharing_context);
801 if (mini_is_gsharedvt_type (cfg, type)) {
802 g_assert (cfg->gsharedvt);
803 inst->type = STACK_VTYPE;
805 type_to_eval_stack_type (cfg, mini_get_underlying_type (cfg, type), inst);
809 g_error ("unknown type 0x%02x in eval stack type", type->type);
814 * The following tables are used to quickly validate the IL code in type_from_op ().
817 bin_num_table [STACK_MAX] [STACK_MAX] = {
818 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
819 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
820 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
821 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
822 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R8},
823 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
824 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
825 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
826 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4}
831 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4
834 /* reduce the size of this table */
836 bin_int_table [STACK_MAX] [STACK_MAX] = {
837 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
838 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
839 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
840 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
841 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
842 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
843 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
844 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
848 bin_comp_table [STACK_MAX] [STACK_MAX] = {
849 /* Inv i L p F & O vt r4 */
851 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
852 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
853 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
854 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* F, R8 */
855 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
856 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
857 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
858 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* r, r4 */
861 /* reduce the size of this table */
863 shift_table [STACK_MAX] [STACK_MAX] = {
864 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
865 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
866 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
867 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
868 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
869 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
870 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
871 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
875 * Tables to map from the non-specific opcode to the matching
876 * type-specific opcode.
878 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
880 binops_op_map [STACK_MAX] = {
881 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD, 0, 0, OP_RADD-CEE_ADD
884 /* handles from CEE_NEG to CEE_CONV_U8 */
886 unops_op_map [STACK_MAX] = {
887 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG, 0, 0, OP_RNEG-CEE_NEG
890 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
892 ovfops_op_map [STACK_MAX] = {
893 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, 0, OP_RCONV_TO_U2-CEE_CONV_U2
896 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
898 ovf2ops_op_map [STACK_MAX] = {
899 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, 0, 0, OP_RCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
902 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
904 ovf3ops_op_map [STACK_MAX] = {
905 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, 0, 0, OP_RCONV_TO_OVF_I1-CEE_CONV_OVF_I1
908 /* handles from CEE_BEQ to CEE_BLT_UN */
910 beqops_op_map [STACK_MAX] = {
911 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, 0, OP_FBEQ-CEE_BEQ
914 /* handles from CEE_CEQ to CEE_CLT_UN */
916 ceqops_op_map [STACK_MAX] = {
917 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, 0, OP_RCEQ-OP_CEQ
921 * Sets ins->type (the type on the eval stack) according to the
922 * type of the opcode and the arguments to it.
923 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
925 * FIXME: this function sets ins->type unconditionally in some cases, but
926 * it should set it to invalid for some types (a conv.x on an object)
929 type_from_op (MonoCompile *cfg, MonoInst *ins, MonoInst *src1, MonoInst *src2)
931 switch (ins->opcode) {
938 /* FIXME: check unverifiable args for STACK_MP */
939 ins->type = bin_num_table [src1->type] [src2->type];
940 ins->opcode += binops_op_map [ins->type];
947 ins->type = bin_int_table [src1->type] [src2->type];
948 ins->opcode += binops_op_map [ins->type];
953 ins->type = shift_table [src1->type] [src2->type];
954 ins->opcode += binops_op_map [ins->type];
959 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
960 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
961 ins->opcode = OP_LCOMPARE;
962 else if (src1->type == STACK_R4)
963 ins->opcode = OP_RCOMPARE;
964 else if (src1->type == STACK_R8)
965 ins->opcode = OP_FCOMPARE;
967 ins->opcode = OP_ICOMPARE;
969 case OP_ICOMPARE_IMM:
970 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
971 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
972 ins->opcode = OP_LCOMPARE_IMM;
984 ins->opcode += beqops_op_map [src1->type];
987 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
988 ins->opcode += ceqops_op_map [src1->type];
994 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
995 ins->opcode += ceqops_op_map [src1->type];
999 ins->type = neg_table [src1->type];
1000 ins->opcode += unops_op_map [ins->type];
1003 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
1004 ins->type = src1->type;
1006 ins->type = STACK_INV;
1007 ins->opcode += unops_op_map [ins->type];
1013 ins->type = STACK_I4;
1014 ins->opcode += unops_op_map [src1->type];
1017 ins->type = STACK_R8;
1018 switch (src1->type) {
1021 ins->opcode = OP_ICONV_TO_R_UN;
1024 ins->opcode = OP_LCONV_TO_R_UN;
1028 case CEE_CONV_OVF_I1:
1029 case CEE_CONV_OVF_U1:
1030 case CEE_CONV_OVF_I2:
1031 case CEE_CONV_OVF_U2:
1032 case CEE_CONV_OVF_I4:
1033 case CEE_CONV_OVF_U4:
1034 ins->type = STACK_I4;
1035 ins->opcode += ovf3ops_op_map [src1->type];
1037 case CEE_CONV_OVF_I_UN:
1038 case CEE_CONV_OVF_U_UN:
1039 ins->type = STACK_PTR;
1040 ins->opcode += ovf2ops_op_map [src1->type];
1042 case CEE_CONV_OVF_I1_UN:
1043 case CEE_CONV_OVF_I2_UN:
1044 case CEE_CONV_OVF_I4_UN:
1045 case CEE_CONV_OVF_U1_UN:
1046 case CEE_CONV_OVF_U2_UN:
1047 case CEE_CONV_OVF_U4_UN:
1048 ins->type = STACK_I4;
1049 ins->opcode += ovf2ops_op_map [src1->type];
1052 ins->type = STACK_PTR;
1053 switch (src1->type) {
1055 ins->opcode = OP_ICONV_TO_U;
1059 #if SIZEOF_VOID_P == 8
1060 ins->opcode = OP_LCONV_TO_U;
1062 ins->opcode = OP_MOVE;
1066 ins->opcode = OP_LCONV_TO_U;
1069 ins->opcode = OP_FCONV_TO_U;
1075 ins->type = STACK_I8;
1076 ins->opcode += unops_op_map [src1->type];
1078 case CEE_CONV_OVF_I8:
1079 case CEE_CONV_OVF_U8:
1080 ins->type = STACK_I8;
1081 ins->opcode += ovf3ops_op_map [src1->type];
1083 case CEE_CONV_OVF_U8_UN:
1084 case CEE_CONV_OVF_I8_UN:
1085 ins->type = STACK_I8;
1086 ins->opcode += ovf2ops_op_map [src1->type];
1089 ins->type = cfg->r4_stack_type;
1090 ins->opcode += unops_op_map [src1->type];
1093 ins->type = STACK_R8;
1094 ins->opcode += unops_op_map [src1->type];
1097 ins->type = STACK_R8;
1101 ins->type = STACK_I4;
1102 ins->opcode += ovfops_op_map [src1->type];
1105 case CEE_CONV_OVF_I:
1106 case CEE_CONV_OVF_U:
1107 ins->type = STACK_PTR;
1108 ins->opcode += ovfops_op_map [src1->type];
1111 case CEE_ADD_OVF_UN:
1113 case CEE_MUL_OVF_UN:
1115 case CEE_SUB_OVF_UN:
1116 ins->type = bin_num_table [src1->type] [src2->type];
1117 ins->opcode += ovfops_op_map [src1->type];
1118 if (ins->type == STACK_R8)
1119 ins->type = STACK_INV;
1121 case OP_LOAD_MEMBASE:
1122 ins->type = STACK_PTR;
1124 case OP_LOADI1_MEMBASE:
1125 case OP_LOADU1_MEMBASE:
1126 case OP_LOADI2_MEMBASE:
1127 case OP_LOADU2_MEMBASE:
1128 case OP_LOADI4_MEMBASE:
1129 case OP_LOADU4_MEMBASE:
1130 ins->type = STACK_PTR;
1132 case OP_LOADI8_MEMBASE:
1133 ins->type = STACK_I8;
1135 case OP_LOADR4_MEMBASE:
1136 ins->type = cfg->r4_stack_type;
1138 case OP_LOADR8_MEMBASE:
1139 ins->type = STACK_R8;
1142 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
1146 if (ins->type == STACK_MP)
1147 ins->klass = mono_defaults.object_class;
1152 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
1158 param_table [STACK_MAX] [STACK_MAX] = {
1163 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
1167 switch (args->type) {
1177 for (i = 0; i < sig->param_count; ++i) {
1178 switch (args [i].type) {
1182 if (!sig->params [i]->byref)
1186 if (sig->params [i]->byref)
1188 switch (sig->params [i]->type) {
1189 case MONO_TYPE_CLASS:
1190 case MONO_TYPE_STRING:
1191 case MONO_TYPE_OBJECT:
1192 case MONO_TYPE_SZARRAY:
1193 case MONO_TYPE_ARRAY:
1200 if (sig->params [i]->byref)
1202 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1211 /*if (!param_table [args [i].type] [sig->params [i]->type])
1219 * When we need a pointer to the current domain many times in a method, we
1220 * call mono_domain_get() once and we store the result in a local variable.
1221 * This function returns the variable that represents the MonoDomain*.
1223 inline static MonoInst *
1224 mono_get_domainvar (MonoCompile *cfg)
1226 if (!cfg->domainvar)
1227 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1228 return cfg->domainvar;
1232 * The got_var contains the address of the Global Offset Table when AOT
1236 mono_get_got_var (MonoCompile *cfg)
1238 #ifdef MONO_ARCH_NEED_GOT_VAR
1239 if (!cfg->compile_aot)
1241 if (!cfg->got_var) {
1242 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1244 return cfg->got_var;
1251 mono_get_vtable_var (MonoCompile *cfg)
1253 g_assert (cfg->generic_sharing_context);
1255 if (!cfg->rgctx_var) {
1256 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1257 /* force the var to be stack allocated */
1258 cfg->rgctx_var->flags |= MONO_INST_VOLATILE;
1261 return cfg->rgctx_var;
1265 type_from_stack_type (MonoInst *ins) {
1266 switch (ins->type) {
1267 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1268 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1269 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1270 case STACK_R4: return &mono_defaults.single_class->byval_arg;
1271 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1273 return &ins->klass->this_arg;
1274 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1275 case STACK_VTYPE: return &ins->klass->byval_arg;
1277 g_error ("stack type %d to monotype not handled\n", ins->type);
1282 static G_GNUC_UNUSED int
1283 type_to_stack_type (MonoCompile *cfg, MonoType *t)
1285 t = mono_type_get_underlying_type (t);
1297 case MONO_TYPE_FNPTR:
1299 case MONO_TYPE_CLASS:
1300 case MONO_TYPE_STRING:
1301 case MONO_TYPE_OBJECT:
1302 case MONO_TYPE_SZARRAY:
1303 case MONO_TYPE_ARRAY:
1309 return cfg->r4_stack_type;
1312 case MONO_TYPE_VALUETYPE:
1313 case MONO_TYPE_TYPEDBYREF:
1315 case MONO_TYPE_GENERICINST:
1316 if (mono_type_generic_inst_is_valuetype (t))
1322 g_assert_not_reached ();
1329 array_access_to_klass (int opcode)
1333 return mono_defaults.byte_class;
1335 return mono_defaults.uint16_class;
1338 return mono_defaults.int_class;
1341 return mono_defaults.sbyte_class;
1344 return mono_defaults.int16_class;
1347 return mono_defaults.int32_class;
1349 return mono_defaults.uint32_class;
1352 return mono_defaults.int64_class;
1355 return mono_defaults.single_class;
1358 return mono_defaults.double_class;
1359 case CEE_LDELEM_REF:
1360 case CEE_STELEM_REF:
1361 return mono_defaults.object_class;
1363 g_assert_not_reached ();
1369 * We try to share variables when possible
1372 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1377 /* inlining can result in deeper stacks */
1378 if (slot >= cfg->header->max_stack)
1379 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1381 pos = ins->type - 1 + slot * STACK_MAX;
1383 switch (ins->type) {
1390 if ((vnum = cfg->intvars [pos]))
1391 return cfg->varinfo [vnum];
1392 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1393 cfg->intvars [pos] = res->inst_c0;
1396 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1402 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1405 * Don't use this if a generic_context is set, since that means AOT can't
1406 * look up the method using just the image+token.
1407 * table == 0 means this is a reference made from a wrapper.
1409 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1410 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1411 jump_info_token->image = image;
1412 jump_info_token->token = token;
1413 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1418 * This function is called to handle items that are left on the evaluation stack
1419 * at basic block boundaries. What happens is that we save the values to local variables
1420 * and we reload them later when first entering the target basic block (with the
1421 * handle_loaded_temps () function).
1422 * A single joint point will use the same variables (stored in the array bb->out_stack or
1423 * bb->in_stack, if the basic block is before or after the joint point).
1425 * This function needs to be called _before_ emitting the last instruction of
1426 * the bb (i.e. before emitting a branch).
1427 * If the stack merge fails at a join point, cfg->unverifiable is set.
1430 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1433 MonoBasicBlock *bb = cfg->cbb;
1434 MonoBasicBlock *outb;
1435 MonoInst *inst, **locals;
1440 if (cfg->verbose_level > 3)
1441 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1442 if (!bb->out_scount) {
1443 bb->out_scount = count;
1444 //printf ("bblock %d has out:", bb->block_num);
1446 for (i = 0; i < bb->out_count; ++i) {
1447 outb = bb->out_bb [i];
1448 /* exception handlers are linked, but they should not be considered for stack args */
1449 if (outb->flags & BB_EXCEPTION_HANDLER)
1451 //printf (" %d", outb->block_num);
1452 if (outb->in_stack) {
1454 bb->out_stack = outb->in_stack;
1460 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1461 for (i = 0; i < count; ++i) {
1463 * try to reuse temps already allocated for this purpouse, if they occupy the same
1464 * stack slot and if they are of the same type.
1465 * This won't cause conflicts since if 'local' is used to
1466 * store one of the values in the in_stack of a bblock, then
1467 * the same variable will be used for the same outgoing stack
1469 * This doesn't work when inlining methods, since the bblocks
1470 * in the inlined methods do not inherit their in_stack from
1471 * the bblock they are inlined to. See bug #58863 for an
1474 if (cfg->inlined_method)
1475 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1477 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1482 for (i = 0; i < bb->out_count; ++i) {
1483 outb = bb->out_bb [i];
1484 /* exception handlers are linked, but they should not be considered for stack args */
1485 if (outb->flags & BB_EXCEPTION_HANDLER)
1487 if (outb->in_scount) {
1488 if (outb->in_scount != bb->out_scount) {
1489 cfg->unverifiable = TRUE;
1492 continue; /* check they are the same locals */
1494 outb->in_scount = count;
1495 outb->in_stack = bb->out_stack;
1498 locals = bb->out_stack;
1500 for (i = 0; i < count; ++i) {
1501 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1502 inst->cil_code = sp [i]->cil_code;
1503 sp [i] = locals [i];
1504 if (cfg->verbose_level > 3)
1505 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1509 * It is possible that the out bblocks already have in_stack assigned, and
1510 * the in_stacks differ. In this case, we will store to all the different
1517 /* Find a bblock which has a different in_stack */
1519 while (bindex < bb->out_count) {
1520 outb = bb->out_bb [bindex];
1521 /* exception handlers are linked, but they should not be considered for stack args */
1522 if (outb->flags & BB_EXCEPTION_HANDLER) {
1526 if (outb->in_stack != locals) {
1527 for (i = 0; i < count; ++i) {
1528 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1529 inst->cil_code = sp [i]->cil_code;
1530 sp [i] = locals [i];
1531 if (cfg->verbose_level > 3)
1532 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1534 locals = outb->in_stack;
1543 /* Emit code which loads interface_offsets [klass->interface_id]
1544 * The array is stored in memory before vtable.
1547 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1549 if (cfg->compile_aot) {
1550 int ioffset_reg = alloc_preg (cfg);
1551 int iid_reg = alloc_preg (cfg);
1553 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1554 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1555 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1558 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1563 mini_emit_interface_bitmap_check (MonoCompile *cfg, int intf_bit_reg, int base_reg, int offset, MonoClass *klass)
1565 int ibitmap_reg = alloc_preg (cfg);
1566 #ifdef COMPRESSED_INTERFACE_BITMAP
1568 MonoInst *res, *ins;
1569 NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, ibitmap_reg, base_reg, offset);
1570 MONO_ADD_INS (cfg->cbb, ins);
1572 if (cfg->compile_aot)
1573 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_IID, klass);
1575 EMIT_NEW_ICONST (cfg, args [1], klass->interface_id);
1576 res = mono_emit_jit_icall (cfg, mono_class_interface_match, args);
1577 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, intf_bit_reg, res->dreg);
1579 int ibitmap_byte_reg = alloc_preg (cfg);
1581 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, base_reg, offset);
1583 if (cfg->compile_aot) {
1584 int iid_reg = alloc_preg (cfg);
1585 int shifted_iid_reg = alloc_preg (cfg);
1586 int ibitmap_byte_address_reg = alloc_preg (cfg);
1587 int masked_iid_reg = alloc_preg (cfg);
1588 int iid_one_bit_reg = alloc_preg (cfg);
1589 int iid_bit_reg = alloc_preg (cfg);
1590 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1591 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1592 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1593 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1594 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1595 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1596 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1597 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1599 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1600 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1606 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1607 * stored in "klass_reg" implements the interface "klass".
1610 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1612 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, interface_bitmap), klass);
1616 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1617 * stored in "vtable_reg" implements the interface "klass".
1620 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1622 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, interface_bitmap), klass);
1626 * Emit code which checks whenever the interface id of @klass is smaller than
1627 * than the value given by max_iid_reg.
1630 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1631 MonoBasicBlock *false_target)
1633 if (cfg->compile_aot) {
1634 int iid_reg = alloc_preg (cfg);
1635 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1636 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1639 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1641 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1643 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1646 /* Same as above, but obtains max_iid from a vtable */
1648 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1649 MonoBasicBlock *false_target)
1651 int max_iid_reg = alloc_preg (cfg);
1653 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, max_interface_id));
1654 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1657 /* Same as above, but obtains max_iid from a klass */
1659 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1660 MonoBasicBlock *false_target)
1662 int max_iid_reg = alloc_preg (cfg);
1664 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, max_interface_id));
1665 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1669 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1671 int idepth_reg = alloc_preg (cfg);
1672 int stypes_reg = alloc_preg (cfg);
1673 int stype = alloc_preg (cfg);
1675 mono_class_setup_supertypes (klass);
1677 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1678 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, idepth));
1679 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1680 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1682 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, supertypes));
1683 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1685 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1686 } else if (cfg->compile_aot) {
1687 int const_reg = alloc_preg (cfg);
1688 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1689 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1691 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1693 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1697 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1699 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1703 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1705 int intf_reg = alloc_preg (cfg);
1707 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1708 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1709 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1711 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1713 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1717 * Variant of the above that takes a register to the class, not the vtable.
1720 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1722 int intf_bit_reg = alloc_preg (cfg);
1724 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1725 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1726 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1728 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1730 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1734 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1737 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1738 } else if (cfg->compile_aot) {
1739 int const_reg = alloc_preg (cfg);
1740 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1741 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1743 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1745 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1749 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1751 mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1755 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1757 if (cfg->compile_aot) {
1758 int const_reg = alloc_preg (cfg);
1759 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1760 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1762 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1764 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1768 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1771 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1774 int rank_reg = alloc_preg (cfg);
1775 int eclass_reg = alloc_preg (cfg);
1777 g_assert (!klass_inst);
1778 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, rank));
1779 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1780 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1781 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
1782 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, cast_class));
1783 if (klass->cast_class == mono_defaults.object_class) {
1784 int parent_reg = alloc_preg (cfg);
1785 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, MONO_STRUCT_OFFSET (MonoClass, parent));
1786 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1787 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1788 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1789 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1790 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1791 } else if (klass->cast_class == mono_defaults.enum_class) {
1792 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1793 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1794 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1796 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1797 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1800 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1801 /* Check that the object is a vector too */
1802 int bounds_reg = alloc_preg (cfg);
1803 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, MONO_STRUCT_OFFSET (MonoArray, bounds));
1804 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1805 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1808 int idepth_reg = alloc_preg (cfg);
1809 int stypes_reg = alloc_preg (cfg);
1810 int stype = alloc_preg (cfg);
1812 mono_class_setup_supertypes (klass);
1814 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1815 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, idepth));
1816 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1817 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1819 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, supertypes));
1820 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1821 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1826 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1828 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1832 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1836 g_assert (val == 0);
1841 if ((size <= SIZEOF_REGISTER) && (size <= align)) {
1844 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1847 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1850 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1852 #if SIZEOF_REGISTER == 8
1854 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1860 val_reg = alloc_preg (cfg);
1862 if (SIZEOF_REGISTER == 8)
1863 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1865 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1868 /* This could be optimized further if neccesary */
1870 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1877 #if !NO_UNALIGNED_ACCESS
1878 if (SIZEOF_REGISTER == 8) {
1880 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1885 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1893 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1898 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1903 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1910 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1917 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1918 g_assert (size < 10000);
1921 /* This could be optimized further if neccesary */
1923 cur_reg = alloc_preg (cfg);
1924 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1925 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1932 #if !NO_UNALIGNED_ACCESS
1933 if (SIZEOF_REGISTER == 8) {
1935 cur_reg = alloc_preg (cfg);
1936 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1937 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1946 cur_reg = alloc_preg (cfg);
1947 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1948 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1954 cur_reg = alloc_preg (cfg);
1955 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1956 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1962 cur_reg = alloc_preg (cfg);
1963 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1964 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1972 emit_tls_set (MonoCompile *cfg, int sreg1, int tls_key)
1976 if (cfg->compile_aot) {
1977 EMIT_NEW_TLS_OFFSETCONST (cfg, c, tls_key);
1978 MONO_INST_NEW (cfg, ins, OP_TLS_SET_REG);
1980 ins->sreg2 = c->dreg;
1981 MONO_ADD_INS (cfg->cbb, ins);
1983 MONO_INST_NEW (cfg, ins, OP_TLS_SET);
1985 ins->inst_offset = mini_get_tls_offset (tls_key);
1986 MONO_ADD_INS (cfg->cbb, ins);
1993 * Emit IR to push the current LMF onto the LMF stack.
1996 emit_push_lmf (MonoCompile *cfg)
1999 * Emit IR to push the LMF:
2000 * lmf_addr = <lmf_addr from tls>
2001 * lmf->lmf_addr = lmf_addr
2002 * lmf->prev_lmf = *lmf_addr
2005 int lmf_reg, prev_lmf_reg;
2006 MonoInst *ins, *lmf_ins;
2011 if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
2012 /* Load current lmf */
2013 lmf_ins = mono_get_lmf_intrinsic (cfg);
2015 MONO_ADD_INS (cfg->cbb, lmf_ins);
2016 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
2017 lmf_reg = ins->dreg;
2018 /* Save previous_lmf */
2019 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), lmf_ins->dreg);
2021 emit_tls_set (cfg, lmf_reg, TLS_KEY_LMF);
2024 * Store lmf_addr in a variable, so it can be allocated to a global register.
2026 if (!cfg->lmf_addr_var)
2027 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2030 ins = mono_get_jit_tls_intrinsic (cfg);
2032 int jit_tls_dreg = ins->dreg;
2034 MONO_ADD_INS (cfg->cbb, ins);
2035 lmf_reg = alloc_preg (cfg);
2036 EMIT_NEW_BIALU_IMM (cfg, lmf_ins, OP_PADD_IMM, lmf_reg, jit_tls_dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
2038 lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
2041 lmf_ins = mono_get_lmf_addr_intrinsic (cfg);
2043 MONO_ADD_INS (cfg->cbb, lmf_ins);
2046 MonoInst *args [16], *jit_tls_ins, *ins;
2048 /* Inline mono_get_lmf_addr () */
2049 /* jit_tls = pthread_getspecific (mono_jit_tls_id); lmf_addr = &jit_tls->lmf; */
2051 /* Load mono_jit_tls_id */
2052 EMIT_NEW_AOTCONST (cfg, args [0], MONO_PATCH_INFO_JIT_TLS_ID, NULL);
2053 /* call pthread_getspecific () */
2054 jit_tls_ins = mono_emit_jit_icall (cfg, pthread_getspecific, args);
2055 /* lmf_addr = &jit_tls->lmf */
2056 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, cfg->lmf_addr_var->dreg, jit_tls_ins->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
2059 lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
2063 lmf_ins->dreg = cfg->lmf_addr_var->dreg;
2065 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
2066 lmf_reg = ins->dreg;
2068 prev_lmf_reg = alloc_preg (cfg);
2069 /* Save previous_lmf */
2070 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, cfg->lmf_addr_var->dreg, 0);
2071 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), prev_lmf_reg);
2073 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, cfg->lmf_addr_var->dreg, 0, lmf_reg);
2080 * Emit IR to pop the current LMF from the LMF stack.
2083 emit_pop_lmf (MonoCompile *cfg)
2085 int lmf_reg, lmf_addr_reg, prev_lmf_reg;
2091 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
2092 lmf_reg = ins->dreg;
2094 if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
2095 /* Load previous_lmf */
2096 prev_lmf_reg = alloc_preg (cfg);
2097 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
2099 emit_tls_set (cfg, prev_lmf_reg, TLS_KEY_LMF);
2102 * Emit IR to pop the LMF:
2103 * *(lmf->lmf_addr) = lmf->prev_lmf
2105 /* This could be called before emit_push_lmf () */
2106 if (!cfg->lmf_addr_var)
2107 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2108 lmf_addr_reg = cfg->lmf_addr_var->dreg;
2110 prev_lmf_reg = alloc_preg (cfg);
2111 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
2112 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_addr_reg, 0, prev_lmf_reg);
2117 emit_instrumentation_call (MonoCompile *cfg, void *func)
2119 MonoInst *iargs [1];
2122 * Avoid instrumenting inlined methods since it can
2123 * distort profiling results.
2125 if (cfg->method != cfg->current_method)
2128 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE) {
2129 EMIT_NEW_METHODCONST (cfg, iargs [0], cfg->method);
2130 mono_emit_jit_icall (cfg, func, iargs);
2135 ret_type_to_call_opcode (MonoCompile *cfg, MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
2138 type = mini_get_underlying_type (cfg, type);
2139 switch (type->type) {
2140 case MONO_TYPE_VOID:
2141 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALL_MEMBASE: OP_VOIDCALL;
2148 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2152 case MONO_TYPE_FNPTR:
2153 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2154 case MONO_TYPE_CLASS:
2155 case MONO_TYPE_STRING:
2156 case MONO_TYPE_OBJECT:
2157 case MONO_TYPE_SZARRAY:
2158 case MONO_TYPE_ARRAY:
2159 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2162 return calli? OP_LCALL_REG: virt? OP_LCALL_MEMBASE: OP_LCALL;
2165 return calli? OP_RCALL_REG: virt? OP_RCALL_MEMBASE: OP_RCALL;
2167 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
2169 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
2170 case MONO_TYPE_VALUETYPE:
2171 if (type->data.klass->enumtype) {
2172 type = mono_class_enum_basetype (type->data.klass);
2175 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2176 case MONO_TYPE_TYPEDBYREF:
2177 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2178 case MONO_TYPE_GENERICINST:
2179 type = &type->data.generic_class->container_class->byval_arg;
2182 case MONO_TYPE_MVAR:
2184 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2186 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
2192 * target_type_is_incompatible:
2193 * @cfg: MonoCompile context
2195 * Check that the item @arg on the evaluation stack can be stored
2196 * in the target type (can be a local, or field, etc).
2197 * The cfg arg can be used to check if we need verification or just
2200 * Returns: non-0 value if arg can't be stored on a target.
2203 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
2205 MonoType *simple_type;
2208 if (target->byref) {
2209 /* FIXME: check that the pointed to types match */
2210 if (arg->type == STACK_MP)
2211 return arg->klass != mono_class_from_mono_type (target);
2212 if (arg->type == STACK_PTR)
2217 simple_type = mini_get_underlying_type (cfg, target);
2218 switch (simple_type->type) {
2219 case MONO_TYPE_VOID:
2227 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
2231 /* STACK_MP is needed when setting pinned locals */
2232 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2237 case MONO_TYPE_FNPTR:
2239 * Some opcodes like ldloca returns 'transient pointers' which can be stored in
2240 * in native int. (#688008).
2242 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2245 case MONO_TYPE_CLASS:
2246 case MONO_TYPE_STRING:
2247 case MONO_TYPE_OBJECT:
2248 case MONO_TYPE_SZARRAY:
2249 case MONO_TYPE_ARRAY:
2250 if (arg->type != STACK_OBJ)
2252 /* FIXME: check type compatibility */
2256 if (arg->type != STACK_I8)
2260 if (arg->type != cfg->r4_stack_type)
2264 if (arg->type != STACK_R8)
2267 case MONO_TYPE_VALUETYPE:
2268 if (arg->type != STACK_VTYPE)
2270 klass = mono_class_from_mono_type (simple_type);
2271 if (klass != arg->klass)
2274 case MONO_TYPE_TYPEDBYREF:
2275 if (arg->type != STACK_VTYPE)
2277 klass = mono_class_from_mono_type (simple_type);
2278 if (klass != arg->klass)
2281 case MONO_TYPE_GENERICINST:
2282 if (mono_type_generic_inst_is_valuetype (simple_type)) {
2283 if (arg->type != STACK_VTYPE)
2285 klass = mono_class_from_mono_type (simple_type);
2286 if (klass != arg->klass)
2290 if (arg->type != STACK_OBJ)
2292 /* FIXME: check type compatibility */
2296 case MONO_TYPE_MVAR:
2297 g_assert (cfg->generic_sharing_context);
2298 if (mini_type_var_is_vt (cfg, simple_type)) {
2299 if (arg->type != STACK_VTYPE)
2302 if (arg->type != STACK_OBJ)
2307 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
2313 * Prepare arguments for passing to a function call.
2314 * Return a non-zero value if the arguments can't be passed to the given
2316 * The type checks are not yet complete and some conversions may need
2317 * casts on 32 or 64 bit architectures.
2319 * FIXME: implement this using target_type_is_incompatible ()
2322 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
2324 MonoType *simple_type;
2328 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
2332 for (i = 0; i < sig->param_count; ++i) {
2333 if (sig->params [i]->byref) {
2334 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
2338 simple_type = mini_get_underlying_type (cfg, sig->params [i]);
2340 switch (simple_type->type) {
2341 case MONO_TYPE_VOID:
2350 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2356 case MONO_TYPE_FNPTR:
2357 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2360 case MONO_TYPE_CLASS:
2361 case MONO_TYPE_STRING:
2362 case MONO_TYPE_OBJECT:
2363 case MONO_TYPE_SZARRAY:
2364 case MONO_TYPE_ARRAY:
2365 if (args [i]->type != STACK_OBJ)
2370 if (args [i]->type != STACK_I8)
2374 if (args [i]->type != cfg->r4_stack_type)
2378 if (args [i]->type != STACK_R8)
2381 case MONO_TYPE_VALUETYPE:
2382 if (simple_type->data.klass->enumtype) {
2383 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2386 if (args [i]->type != STACK_VTYPE)
2389 case MONO_TYPE_TYPEDBYREF:
2390 if (args [i]->type != STACK_VTYPE)
2393 case MONO_TYPE_GENERICINST:
2394 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2397 case MONO_TYPE_MVAR:
2399 if (args [i]->type != STACK_VTYPE)
2403 g_error ("unknown type 0x%02x in check_call_signature",
2411 callvirt_to_call (int opcode)
2414 case OP_CALL_MEMBASE:
2416 case OP_VOIDCALL_MEMBASE:
2418 case OP_FCALL_MEMBASE:
2420 case OP_RCALL_MEMBASE:
2422 case OP_VCALL_MEMBASE:
2424 case OP_LCALL_MEMBASE:
2427 g_assert_not_reached ();
2433 /* Either METHOD or IMT_ARG needs to be set */
2435 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoMethod *method, MonoInst *imt_arg)
2439 if (COMPILE_LLVM (cfg)) {
2440 method_reg = alloc_preg (cfg);
2443 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2444 } else if (cfg->compile_aot) {
2445 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, method, MONO_PATCH_INFO_METHODCONST);
2448 MONO_INST_NEW (cfg, ins, OP_PCONST);
2449 ins->inst_p0 = method;
2450 ins->dreg = method_reg;
2451 MONO_ADD_INS (cfg->cbb, ins);
2455 call->imt_arg_reg = method_reg;
2457 #ifdef MONO_ARCH_IMT_REG
2458 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2460 /* Need this to keep the IMT arg alive */
2461 mono_call_inst_add_outarg_reg (cfg, call, method_reg, 0, FALSE);
2466 #ifdef MONO_ARCH_IMT_REG
2467 method_reg = alloc_preg (cfg);
2470 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2471 } else if (cfg->compile_aot) {
2472 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, method, MONO_PATCH_INFO_METHODCONST);
2475 MONO_INST_NEW (cfg, ins, OP_PCONST);
2476 ins->inst_p0 = method;
2477 ins->dreg = method_reg;
2478 MONO_ADD_INS (cfg->cbb, ins);
2481 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2483 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2487 static MonoJumpInfo *
2488 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2490 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2494 ji->data.target = target;
2500 mini_class_check_context_used (MonoCompile *cfg, MonoClass *klass)
2502 if (cfg->generic_sharing_context)
2503 return mono_class_check_context_used (klass);
2509 mini_method_check_context_used (MonoCompile *cfg, MonoMethod *method)
2511 if (cfg->generic_sharing_context)
2512 return mono_method_check_context_used (method);
2518 * check_method_sharing:
2520 * Check whenever the vtable or an mrgctx needs to be passed when calling CMETHOD.
2523 check_method_sharing (MonoCompile *cfg, MonoMethod *cmethod, gboolean *out_pass_vtable, gboolean *out_pass_mrgctx)
2525 gboolean pass_vtable = FALSE;
2526 gboolean pass_mrgctx = FALSE;
2528 if (((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
2529 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
2530 gboolean sharable = FALSE;
2532 if (mono_method_is_generic_sharable (cmethod, TRUE)) {
2535 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
2536 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
2537 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
2539 sharable = sharing_enabled && context_sharable;
2543 * Pass vtable iff target method might
2544 * be shared, which means that sharing
2545 * is enabled for its class and its
2546 * context is sharable (and it's not a
2549 if (sharable && !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
2553 if (mini_method_get_context (cmethod) &&
2554 mini_method_get_context (cmethod)->method_inst) {
2555 g_assert (!pass_vtable);
2557 if (mono_method_is_generic_sharable (cmethod, TRUE)) {
2560 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
2561 MonoGenericContext *context = mini_method_get_context (cmethod);
2562 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
2564 if (sharing_enabled && context_sharable)
2566 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, mono_method_signature (cmethod)))
2571 if (out_pass_vtable)
2572 *out_pass_vtable = pass_vtable;
2573 if (out_pass_mrgctx)
2574 *out_pass_mrgctx = pass_mrgctx;
2577 inline static MonoCallInst *
2578 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2579 MonoInst **args, int calli, int virtual, int tail, int rgctx, int unbox_trampoline)
2583 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2588 emit_instrumentation_call (cfg, mono_profiler_method_leave);
2590 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2592 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (cfg, sig->ret, calli, virtual, cfg->generic_sharing_context));
2595 call->signature = sig;
2596 call->rgctx_reg = rgctx;
2597 sig_ret = mini_get_underlying_type (cfg, sig->ret);
2599 type_to_eval_stack_type ((cfg), sig_ret, &call->inst);
2602 if (mini_type_is_vtype (cfg, sig_ret)) {
2603 call->vret_var = cfg->vret_addr;
2604 //g_assert_not_reached ();
2606 } else if (mini_type_is_vtype (cfg, sig_ret)) {
2607 MonoInst *temp = mono_compile_create_var (cfg, sig_ret, OP_LOCAL);
2610 temp->backend.is_pinvoke = sig->pinvoke;
2613 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2614 * address of return value to increase optimization opportunities.
2615 * Before vtype decomposition, the dreg of the call ins itself represents the
2616 * fact the call modifies the return value. After decomposition, the call will
2617 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2618 * will be transformed into an LDADDR.
2620 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2621 loada->dreg = alloc_preg (cfg);
2622 loada->inst_p0 = temp;
2623 /* We reference the call too since call->dreg could change during optimization */
2624 loada->inst_p1 = call;
2625 MONO_ADD_INS (cfg->cbb, loada);
2627 call->inst.dreg = temp->dreg;
2629 call->vret_var = loada;
2630 } else if (!MONO_TYPE_IS_VOID (sig_ret))
2631 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2633 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2634 if (COMPILE_SOFT_FLOAT (cfg)) {
2636 * If the call has a float argument, we would need to do an r8->r4 conversion using
2637 * an icall, but that cannot be done during the call sequence since it would clobber
2638 * the call registers + the stack. So we do it before emitting the call.
2640 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2642 MonoInst *in = call->args [i];
2644 if (i >= sig->hasthis)
2645 t = sig->params [i - sig->hasthis];
2647 t = &mono_defaults.int_class->byval_arg;
2648 t = mono_type_get_underlying_type (t);
2650 if (!t->byref && t->type == MONO_TYPE_R4) {
2651 MonoInst *iargs [1];
2655 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2657 /* The result will be in an int vreg */
2658 call->args [i] = conv;
2664 call->need_unbox_trampoline = unbox_trampoline;
2667 if (COMPILE_LLVM (cfg))
2668 mono_llvm_emit_call (cfg, call);
2670 mono_arch_emit_call (cfg, call);
2672 mono_arch_emit_call (cfg, call);
2675 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2676 cfg->flags |= MONO_CFG_HAS_CALLS;
2682 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2684 #ifdef MONO_ARCH_RGCTX_REG
2685 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2686 cfg->uses_rgctx_reg = TRUE;
2687 call->rgctx_reg = TRUE;
2689 call->rgctx_arg_reg = rgctx_reg;
2696 inline static MonoInst*
2697 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *imt_arg, MonoInst *rgctx_arg)
2702 gboolean check_sp = FALSE;
2704 if (cfg->check_pinvoke_callconv && cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
2705 WrapperInfo *info = mono_marshal_get_wrapper_info (cfg->method);
2707 if (info && info->subtype == WRAPPER_SUBTYPE_PINVOKE)
2712 rgctx_reg = mono_alloc_preg (cfg);
2713 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2717 if (!cfg->stack_inbalance_var)
2718 cfg->stack_inbalance_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2720 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2721 ins->dreg = cfg->stack_inbalance_var->dreg;
2722 MONO_ADD_INS (cfg->cbb, ins);
2725 call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE, rgctx_arg ? TRUE : FALSE, FALSE);
2727 call->inst.sreg1 = addr->dreg;
2730 emit_imt_argument (cfg, call, NULL, imt_arg);
2732 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2737 sp_reg = mono_alloc_preg (cfg);
2739 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2741 MONO_ADD_INS (cfg->cbb, ins);
2743 /* Restore the stack so we don't crash when throwing the exception */
2744 MONO_INST_NEW (cfg, ins, OP_SET_SP);
2745 ins->sreg1 = cfg->stack_inbalance_var->dreg;
2746 MONO_ADD_INS (cfg->cbb, ins);
2748 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, cfg->stack_inbalance_var->dreg, sp_reg);
2749 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ExecutionEngineException");
2753 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2755 return (MonoInst*)call;
2759 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2762 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type);
2764 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2767 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig, gboolean tail,
2768 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *rgctx_arg)
2770 #ifndef DISABLE_REMOTING
2771 gboolean might_be_remote = FALSE;
2773 gboolean virtual = this != NULL;
2774 gboolean enable_for_aot = TRUE;
2778 gboolean need_unbox_trampoline;
2781 sig = mono_method_signature (method);
2784 rgctx_reg = mono_alloc_preg (cfg);
2785 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2788 if (method->string_ctor) {
2789 /* Create the real signature */
2790 /* FIXME: Cache these */
2791 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2792 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2797 context_used = mini_method_check_context_used (cfg, method);
2799 #ifndef DISABLE_REMOTING
2800 might_be_remote = this && sig->hasthis &&
2801 (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) &&
2802 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && (!MONO_CHECK_THIS (this) || context_used);
2804 if (might_be_remote && context_used) {
2807 g_assert (cfg->generic_sharing_context);
2809 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2811 return mono_emit_calli (cfg, sig, args, addr, NULL, NULL);
2815 need_unbox_trampoline = method->klass == mono_defaults.object_class || (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE);
2817 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, tail, rgctx_arg ? TRUE : FALSE, need_unbox_trampoline);
2819 #ifndef DISABLE_REMOTING
2820 if (might_be_remote)
2821 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2824 call->method = method;
2825 call->inst.flags |= MONO_INST_HAS_METHOD;
2826 call->inst.inst_left = this;
2827 call->tail_call = tail;
2830 int vtable_reg, slot_reg, this_reg;
2833 this_reg = this->dreg;
2835 if (ARCH_HAVE_DELEGATE_TRAMPOLINES && (method->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (method->name, "Invoke")) {
2836 MonoInst *dummy_use;
2838 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2840 /* Make a call to delegate->invoke_impl */
2841 call->inst.inst_basereg = this_reg;
2842 call->inst.inst_offset = MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2843 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2845 /* We must emit a dummy use here because the delegate trampoline will
2846 replace the 'this' argument with the delegate target making this activation
2847 no longer a root for the delegate.
2848 This is an issue for delegates that target collectible code such as dynamic
2849 methods of GC'able assemblies.
2851 For a test case look into #667921.
2853 FIXME: a dummy use is not the best way to do it as the local register allocator
2854 will put it on a caller save register and spil it around the call.
2855 Ideally, we would either put it on a callee save register or only do the store part.
2857 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [0]);
2859 return (MonoInst*)call;
2862 if ((!cfg->compile_aot || enable_for_aot) &&
2863 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2864 (MONO_METHOD_IS_FINAL (method) &&
2865 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2866 !(mono_class_is_marshalbyref (method->klass) && context_used)) {
2868 * the method is not virtual, we just need to ensure this is not null
2869 * and then we can call the method directly.
2871 #ifndef DISABLE_REMOTING
2872 if (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) {
2874 * The check above ensures method is not gshared, this is needed since
2875 * gshared methods can't have wrappers.
2877 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2881 if (!method->string_ctor)
2882 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2884 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2885 } else if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2887 * the method is virtual, but we can statically dispatch since either
2888 * it's class or the method itself are sealed.
2889 * But first we need to ensure it's not a null reference.
2891 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2893 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2895 vtable_reg = alloc_preg (cfg);
2896 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2897 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2900 guint32 imt_slot = mono_method_get_imt_slot (method);
2901 emit_imt_argument (cfg, call, call->method, imt_arg);
2902 slot_reg = vtable_reg;
2903 offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2905 if (slot_reg == -1) {
2906 slot_reg = alloc_preg (cfg);
2907 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2908 offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
2911 slot_reg = vtable_reg;
2912 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) +
2913 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2915 g_assert (mono_method_signature (method)->generic_param_count);
2916 emit_imt_argument (cfg, call, call->method, imt_arg);
2920 call->inst.sreg1 = slot_reg;
2921 call->inst.inst_offset = offset;
2922 call->virtual = TRUE;
2926 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2929 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2931 return (MonoInst*)call;
2935 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2937 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), FALSE, args, this, NULL, NULL);
2941 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2948 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE, FALSE, FALSE);
2951 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2953 return (MonoInst*)call;
2957 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2959 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2963 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2967 * mono_emit_abs_call:
2969 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2971 inline static MonoInst*
2972 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2973 MonoMethodSignature *sig, MonoInst **args)
2975 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2979 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2982 if (cfg->abs_patches == NULL)
2983 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2984 g_hash_table_insert (cfg->abs_patches, ji, ji);
2985 ins = mono_emit_native_call (cfg, ji, sig, args);
2986 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2991 mono_emit_jit_icall_by_info (MonoCompile *cfg, MonoJitICallInfo *info, MonoInst **args, MonoBasicBlock **out_cbb)
2993 gboolean no_wrapper = FALSE;
2996 * Call the jit icall without a wrapper if possible.
2997 * The wrapper is needed for the following reasons:
2998 * - to handle exceptions thrown using mono_raise_exceptions () from the
2999 * icall function. The EH code needs the lmf frame pushed by the
3000 * wrapper to be able to unwind back to managed code.
3001 * - to be able to do stack walks for asynchronously suspended
3002 * threads when debugging.
3004 if (info->no_raise) {
3005 if (cfg->compile_aot) {
3006 // FIXME: This might be loaded into a runtime during debugging
3007 // even if it is not compiled using 'soft-debug'.
3010 /* LLVM on amd64 can't handle calls to non-32 bit addresses */
3011 if ((cfg->compile_llvm && SIZEOF_VOID_P == 8) || cfg->gen_seq_points_debug_data)
3020 if (!info->wrapper_method) {
3021 name = g_strdup_printf ("__icall_wrapper_%s", info->name);
3022 info->wrapper_method = mono_marshal_get_icall_wrapper (info->sig, name, info->func, TRUE);
3024 mono_memory_barrier ();
3028 * Inline the wrapper method, which is basically a call to the C icall, and
3029 * an exception check.
3031 costs = inline_method (cfg, info->wrapper_method, NULL,
3032 args, NULL, cfg->real_offset, TRUE, out_cbb);
3033 g_assert (costs > 0);
3034 g_assert (!MONO_TYPE_IS_VOID (info->sig->ret));
3038 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
3043 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
3045 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
3046 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
3050 * Native code might return non register sized integers
3051 * without initializing the upper bits.
3053 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
3054 case OP_LOADI1_MEMBASE:
3055 widen_op = OP_ICONV_TO_I1;
3057 case OP_LOADU1_MEMBASE:
3058 widen_op = OP_ICONV_TO_U1;
3060 case OP_LOADI2_MEMBASE:
3061 widen_op = OP_ICONV_TO_I2;
3063 case OP_LOADU2_MEMBASE:
3064 widen_op = OP_ICONV_TO_U2;
3070 if (widen_op != -1) {
3071 int dreg = alloc_preg (cfg);
3074 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
3075 widen->type = ins->type;
3085 get_memcpy_method (void)
3087 static MonoMethod *memcpy_method = NULL;
3088 if (!memcpy_method) {
3089 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
3091 g_error ("Old corlib found. Install a new one");
3093 return memcpy_method;
3097 create_write_barrier_bitmap (MonoCompile *cfg, MonoClass *klass, unsigned *wb_bitmap, int offset)
3099 MonoClassField *field;
3100 gpointer iter = NULL;
3102 while ((field = mono_class_get_fields (klass, &iter))) {
3105 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
3107 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
3108 if (mini_type_is_reference (cfg, mono_field_get_type (field))) {
3109 g_assert ((foffset % SIZEOF_VOID_P) == 0);
3110 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
3112 MonoClass *field_class = mono_class_from_mono_type (field->type);
3113 if (field_class->has_references)
3114 create_write_barrier_bitmap (cfg, field_class, wb_bitmap, offset + foffset);
3120 emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value)
3122 int card_table_shift_bits;
3123 gpointer card_table_mask;
3125 MonoInst *dummy_use;
3126 int nursery_shift_bits;
3127 size_t nursery_size;
3128 gboolean has_card_table_wb = FALSE;
3130 if (!cfg->gen_write_barriers)
3133 card_table = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
3135 mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
3137 #ifdef MONO_ARCH_HAVE_CARD_TABLE_WBARRIER
3138 has_card_table_wb = TRUE;
3141 if (has_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0 && !COMPILE_LLVM (cfg)) {
3144 MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
3145 wbarrier->sreg1 = ptr->dreg;
3146 wbarrier->sreg2 = value->dreg;
3147 MONO_ADD_INS (cfg->cbb, wbarrier);
3148 } else if (card_table) {
3149 int offset_reg = alloc_preg (cfg);
3150 int card_reg = alloc_preg (cfg);
3153 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
3154 if (card_table_mask)
3155 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
3157 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
3158 * IMM's larger than 32bits.
3160 if (cfg->compile_aot) {
3161 MONO_EMIT_NEW_AOTCONST (cfg, card_reg, NULL, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR);
3163 MONO_INST_NEW (cfg, ins, OP_PCONST);
3164 ins->inst_p0 = card_table;
3165 ins->dreg = card_reg;
3166 MONO_ADD_INS (cfg->cbb, ins);
3169 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
3170 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
3172 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
3173 mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
3176 EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
3180 mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
3182 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
3183 unsigned need_wb = 0;
3188 /*types with references can't have alignment smaller than sizeof(void*) */
3189 if (align < SIZEOF_VOID_P)
3192 /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
3193 if (size > 32 * SIZEOF_VOID_P)
3196 create_write_barrier_bitmap (cfg, klass, &need_wb, 0);
3198 /* We don't unroll more than 5 stores to avoid code bloat. */
3199 if (size > 5 * SIZEOF_VOID_P) {
3200 /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
3201 size += (SIZEOF_VOID_P - 1);
3202 size &= ~(SIZEOF_VOID_P - 1);
3204 EMIT_NEW_ICONST (cfg, iargs [2], size);
3205 EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
3206 mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
3210 destreg = iargs [0]->dreg;
3211 srcreg = iargs [1]->dreg;
3214 dest_ptr_reg = alloc_preg (cfg);
3215 tmp_reg = alloc_preg (cfg);
3218 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
3220 while (size >= SIZEOF_VOID_P) {
3221 MonoInst *load_inst;
3222 MONO_INST_NEW (cfg, load_inst, OP_LOAD_MEMBASE);
3223 load_inst->dreg = tmp_reg;
3224 load_inst->inst_basereg = srcreg;
3225 load_inst->inst_offset = offset;
3226 MONO_ADD_INS (cfg->cbb, load_inst);
3228 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
3231 emit_write_barrier (cfg, iargs [0], load_inst);
3233 offset += SIZEOF_VOID_P;
3234 size -= SIZEOF_VOID_P;
3237 /*tmp += sizeof (void*)*/
3238 if (size >= SIZEOF_VOID_P) {
3239 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
3240 MONO_ADD_INS (cfg->cbb, iargs [0]);
3244 /* Those cannot be references since size < sizeof (void*) */
3246 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
3247 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
3253 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
3254 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
3260 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
3261 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
3270 * Emit code to copy a valuetype of type @klass whose address is stored in
3271 * @src->dreg to memory whose address is stored at @dest->dreg.
3274 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
3276 MonoInst *iargs [4];
3279 MonoMethod *memcpy_method;
3280 MonoInst *size_ins = NULL;
3281 MonoInst *memcpy_ins = NULL;
3284 if (cfg->generic_sharing_context)
3285 klass = mono_class_from_mono_type (mini_get_underlying_type (cfg, &klass->byval_arg));
3288 * This check breaks with spilled vars... need to handle it during verification anyway.
3289 * g_assert (klass && klass == src->klass && klass == dest->klass);
3292 if (mini_is_gsharedvt_klass (cfg, klass)) {
3294 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3295 memcpy_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_MEMCPY);
3299 n = mono_class_native_size (klass, &align);
3301 n = mono_class_value_size (klass, &align);
3303 /* if native is true there should be no references in the struct */
3304 if (cfg->gen_write_barriers && (klass->has_references || size_ins) && !native) {
3305 /* Avoid barriers when storing to the stack */
3306 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
3307 (dest->opcode == OP_LDADDR))) {
3313 context_used = mini_class_check_context_used (cfg, klass);
3315 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
3316 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
3318 } else if (context_used) {
3319 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3321 if (cfg->compile_aot) {
3322 EMIT_NEW_CLASSCONST (cfg, iargs [2], klass);
3324 EMIT_NEW_PCONST (cfg, iargs [2], klass);
3325 mono_class_compute_gc_descriptor (klass);
3330 mono_emit_jit_icall (cfg, mono_gsharedvt_value_copy, iargs);
3332 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
3337 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 8) {
3338 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
3339 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
3344 iargs [2] = size_ins;
3346 EMIT_NEW_ICONST (cfg, iargs [2], n);
3348 memcpy_method = get_memcpy_method ();
3350 mono_emit_calli (cfg, mono_method_signature (memcpy_method), iargs, memcpy_ins, NULL, NULL);
3352 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
3357 get_memset_method (void)
3359 static MonoMethod *memset_method = NULL;
3360 if (!memset_method) {
3361 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
3363 g_error ("Old corlib found. Install a new one");
3365 return memset_method;
3369 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
3371 MonoInst *iargs [3];
3374 MonoMethod *memset_method;
3375 MonoInst *size_ins = NULL;
3376 MonoInst *bzero_ins = NULL;
3377 static MonoMethod *bzero_method;
3379 /* FIXME: Optimize this for the case when dest is an LDADDR */
3380 mono_class_init (klass);
3381 if (mini_is_gsharedvt_klass (cfg, klass)) {
3382 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3383 bzero_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_BZERO);
3385 bzero_method = mono_class_get_method_from_name (mono_defaults.string_class, "bzero_aligned_1", 2);
3386 g_assert (bzero_method);
3388 iargs [1] = size_ins;
3389 mono_emit_calli (cfg, mono_method_signature (bzero_method), iargs, bzero_ins, NULL, NULL);
3393 n = mono_class_value_size (klass, &align);
3395 if (n <= sizeof (gpointer) * 8) {
3396 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
3399 memset_method = get_memset_method ();
3401 EMIT_NEW_ICONST (cfg, iargs [1], 0);
3402 EMIT_NEW_ICONST (cfg, iargs [2], n);
3403 mono_emit_method_call (cfg, memset_method, iargs, NULL);
3408 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
3410 MonoInst *this = NULL;
3412 g_assert (cfg->generic_sharing_context);
3414 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
3415 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
3416 !method->klass->valuetype)
3417 EMIT_NEW_ARGLOAD (cfg, this, 0);
3419 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
3420 MonoInst *mrgctx_loc, *mrgctx_var;
3423 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
3425 mrgctx_loc = mono_get_vtable_var (cfg);
3426 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
3429 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
3430 MonoInst *vtable_loc, *vtable_var;
3434 vtable_loc = mono_get_vtable_var (cfg);
3435 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
3437 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
3438 MonoInst *mrgctx_var = vtable_var;
3441 vtable_reg = alloc_preg (cfg);
3442 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, MONO_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
3443 vtable_var->type = STACK_PTR;
3451 vtable_reg = alloc_preg (cfg);
3452 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3457 static MonoJumpInfoRgctxEntry *
3458 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, MonoRgctxInfoType info_type)
3460 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
3461 res->method = method;
3462 res->in_mrgctx = in_mrgctx;
3463 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
3464 res->data->type = patch_type;
3465 res->data->data.target = patch_data;
3466 res->info_type = info_type;
3471 static inline MonoInst*
3472 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3474 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
3478 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
3479 MonoClass *klass, MonoRgctxInfoType rgctx_type)
3481 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
3482 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3484 return emit_rgctx_fetch (cfg, rgctx, entry);
3488 emit_get_rgctx_sig (MonoCompile *cfg, int context_used,
3489 MonoMethodSignature *sig, MonoRgctxInfoType rgctx_type)
3491 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_SIGNATURE, sig, rgctx_type);
3492 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3494 return emit_rgctx_fetch (cfg, rgctx, entry);
3498 emit_get_rgctx_gsharedvt_call (MonoCompile *cfg, int context_used,
3499 MonoMethodSignature *sig, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3501 MonoJumpInfoGSharedVtCall *call_info;
3502 MonoJumpInfoRgctxEntry *entry;
3505 call_info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoGSharedVtCall));
3506 call_info->sig = sig;
3507 call_info->method = cmethod;
3509 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_CALL, call_info, rgctx_type);
3510 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3512 return emit_rgctx_fetch (cfg, rgctx, entry);
3517 emit_get_rgctx_gsharedvt_method (MonoCompile *cfg, int context_used,
3518 MonoMethod *cmethod, MonoGSharedVtMethodInfo *info)
3520 MonoJumpInfoRgctxEntry *entry;
3523 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_METHOD, info, MONO_RGCTX_INFO_METHOD_GSHAREDVT_INFO);
3524 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3526 return emit_rgctx_fetch (cfg, rgctx, entry);
3530 * emit_get_rgctx_method:
3532 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
3533 * normal constants, else emit a load from the rgctx.
3536 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
3537 MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3539 if (!context_used) {
3542 switch (rgctx_type) {
3543 case MONO_RGCTX_INFO_METHOD:
3544 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
3546 case MONO_RGCTX_INFO_METHOD_RGCTX:
3547 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
3550 g_assert_not_reached ();
3553 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
3554 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3556 return emit_rgctx_fetch (cfg, rgctx, entry);
3561 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
3562 MonoClassField *field, MonoRgctxInfoType rgctx_type)
3564 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
3565 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3567 return emit_rgctx_fetch (cfg, rgctx, entry);
3571 get_gsharedvt_info_slot (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3573 MonoGSharedVtMethodInfo *info = cfg->gsharedvt_info;
3574 MonoRuntimeGenericContextInfoTemplate *template;
3579 for (i = 0; i < info->num_entries; ++i) {
3580 MonoRuntimeGenericContextInfoTemplate *otemplate = &info->entries [i];
3582 if (otemplate->info_type == rgctx_type && otemplate->data == data && rgctx_type != MONO_RGCTX_INFO_LOCAL_OFFSET)
3586 if (info->num_entries == info->count_entries) {
3587 MonoRuntimeGenericContextInfoTemplate *new_entries;
3588 int new_count_entries = info->count_entries ? info->count_entries * 2 : 16;
3590 new_entries = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * new_count_entries);
3592 memcpy (new_entries, info->entries, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
3593 info->entries = new_entries;
3594 info->count_entries = new_count_entries;
3597 idx = info->num_entries;
3598 template = &info->entries [idx];
3599 template->info_type = rgctx_type;
3600 template->data = data;
3602 info->num_entries ++;
3608 * emit_get_gsharedvt_info:
3610 * This is similar to emit_get_rgctx_.., but loads the data from the gsharedvt info var instead of calling an rgctx fetch trampoline.
3613 emit_get_gsharedvt_info (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3618 idx = get_gsharedvt_info_slot (cfg, data, rgctx_type);
3619 /* Load info->entries [idx] */
3620 dreg = alloc_preg (cfg);
3621 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, cfg->gsharedvt_info_var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
3627 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type)
3629 return emit_get_gsharedvt_info (cfg, &klass->byval_arg, rgctx_type);
3633 * On return the caller must check @klass for load errors.
3636 emit_generic_class_init (MonoCompile *cfg, MonoClass *klass)
3638 MonoInst *vtable_arg;
3642 context_used = mini_class_check_context_used (cfg, klass);
3645 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
3646 klass, MONO_RGCTX_INFO_VTABLE);
3648 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3652 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
3655 if (COMPILE_LLVM (cfg))
3656 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline_llvm, &vtable_arg);
3658 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable_arg);
3659 #ifdef MONO_ARCH_VTABLE_REG
3660 mono_call_inst_add_outarg_reg (cfg, call, vtable_arg->dreg, MONO_ARCH_VTABLE_REG, FALSE);
3661 cfg->uses_vtable_reg = TRUE;
3668 emit_seq_point (MonoCompile *cfg, MonoMethod *method, guint8* ip, gboolean intr_loc, gboolean nonempty_stack)
3672 if (cfg->gen_seq_points && cfg->method == method) {
3673 NEW_SEQ_POINT (cfg, ins, ip - cfg->header->code, intr_loc);
3675 ins->flags |= MONO_INST_NONEMPTY_STACK;
3676 MONO_ADD_INS (cfg->cbb, ins);
3681 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg, gboolean null_check, MonoBasicBlock **out_bblock)
3683 if (mini_get_debug_options ()->better_cast_details) {
3684 int vtable_reg = alloc_preg (cfg);
3685 int klass_reg = alloc_preg (cfg);
3686 MonoBasicBlock *is_null_bb = NULL;
3688 int to_klass_reg, context_used;
3691 NEW_BBLOCK (cfg, is_null_bb);
3693 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3694 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3697 tls_get = mono_get_jit_tls_intrinsic (cfg);
3699 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3703 MONO_ADD_INS (cfg->cbb, tls_get);
3704 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3705 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3707 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3709 context_used = mini_class_check_context_used (cfg, klass);
3711 MonoInst *class_ins;
3713 class_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3714 to_klass_reg = class_ins->dreg;
3716 to_klass_reg = alloc_preg (cfg);
3717 MONO_EMIT_NEW_CLASSCONST (cfg, to_klass_reg, klass);
3719 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3722 MONO_START_BB (cfg, is_null_bb);
3724 *out_bblock = cfg->cbb;
3730 reset_cast_details (MonoCompile *cfg)
3732 /* Reset the variables holding the cast details */
3733 if (mini_get_debug_options ()->better_cast_details) {
3734 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3736 MONO_ADD_INS (cfg->cbb, tls_get);
3737 /* It is enough to reset the from field */
3738 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3743 * On return the caller must check @array_class for load errors
3746 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
3748 int vtable_reg = alloc_preg (cfg);
3751 context_used = mini_class_check_context_used (cfg, array_class);
3753 save_cast_details (cfg, array_class, obj->dreg, FALSE, NULL);
3755 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3757 if (cfg->opt & MONO_OPT_SHARED) {
3758 int class_reg = alloc_preg (cfg);
3759 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3760 if (cfg->compile_aot) {
3761 int klass_reg = alloc_preg (cfg);
3762 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
3763 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
3765 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
3767 } else if (context_used) {
3768 MonoInst *vtable_ins;
3770 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
3771 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
3773 if (cfg->compile_aot) {
3777 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3779 vt_reg = alloc_preg (cfg);
3780 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
3781 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
3784 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3786 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
3790 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
3792 reset_cast_details (cfg);
3796 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
3797 * generic code is generated.
3800 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
3802 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
3805 MonoInst *rgctx, *addr;
3807 /* FIXME: What if the class is shared? We might not
3808 have to get the address of the method from the
3810 addr = emit_get_rgctx_method (cfg, context_used, method,
3811 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3813 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3815 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
3817 gboolean pass_vtable, pass_mrgctx;
3818 MonoInst *rgctx_arg = NULL;
3820 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
3821 g_assert (!pass_mrgctx);
3824 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3827 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
3830 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
3835 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
3839 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
3840 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
3841 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
3842 int rank_reg = alloc_dreg (cfg ,STACK_I4);
3844 obj_reg = sp [0]->dreg;
3845 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3846 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
3848 /* FIXME: generics */
3849 g_assert (klass->rank == 0);
3852 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
3853 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3855 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3856 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, element_class));
3859 MonoInst *element_class;
3861 /* This assertion is from the unboxcast insn */
3862 g_assert (klass->rank == 0);
3864 element_class = emit_get_rgctx_klass (cfg, context_used,
3865 klass, MONO_RGCTX_INFO_ELEMENT_KLASS);
3867 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
3868 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3870 save_cast_details (cfg, klass->element_class, obj_reg, FALSE, NULL);
3871 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
3872 reset_cast_details (cfg);
3875 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), obj_reg, sizeof (MonoObject));
3876 MONO_ADD_INS (cfg->cbb, add);
3877 add->type = STACK_MP;
3884 handle_unbox_gsharedvt (MonoCompile *cfg, MonoClass *klass, MonoInst *obj, MonoBasicBlock **out_cbb)
3886 MonoInst *addr, *klass_inst, *is_ref, *args[16];
3887 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
3891 klass_inst = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_KLASS);
3897 args [1] = klass_inst;
3900 obj = mono_emit_jit_icall (cfg, mono_object_castclass_unbox, args);
3902 NEW_BBLOCK (cfg, is_ref_bb);
3903 NEW_BBLOCK (cfg, is_nullable_bb);
3904 NEW_BBLOCK (cfg, end_bb);
3905 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
3906 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 1);
3907 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
3909 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 2);
3910 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
3912 /* This will contain either the address of the unboxed vtype, or an address of the temporary where the ref is stored */
3913 addr_reg = alloc_dreg (cfg, STACK_MP);
3917 NEW_BIALU_IMM (cfg, addr, OP_ADD_IMM, addr_reg, obj->dreg, sizeof (MonoObject));
3918 MONO_ADD_INS (cfg->cbb, addr);
3920 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3923 MONO_START_BB (cfg, is_ref_bb);
3925 /* Save the ref to a temporary */
3926 dreg = alloc_ireg (cfg);
3927 EMIT_NEW_VARLOADA_VREG (cfg, addr, dreg, &klass->byval_arg);
3928 addr->dreg = addr_reg;
3929 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, obj->dreg);
3930 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3933 MONO_START_BB (cfg, is_nullable_bb);
3936 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_NULLABLE_CLASS_UNBOX);
3937 MonoInst *unbox_call;
3938 MonoMethodSignature *unbox_sig;
3940 unbox_sig = mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
3941 unbox_sig->ret = &klass->byval_arg;
3942 unbox_sig->param_count = 1;
3943 unbox_sig->params [0] = &mono_defaults.object_class->byval_arg;
3944 unbox_call = mono_emit_calli (cfg, unbox_sig, &obj, addr, NULL, NULL);
3946 EMIT_NEW_VARLOADA_VREG (cfg, addr, unbox_call->dreg, &klass->byval_arg);
3947 addr->dreg = addr_reg;
3950 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3953 MONO_START_BB (cfg, end_bb);
3956 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr_reg, 0);
3958 *out_cbb = cfg->cbb;
3964 * Returns NULL and set the cfg exception on error.
3967 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
3969 MonoInst *iargs [2];
3975 MonoInst *iargs [2];
3976 gboolean known_instance_size = !mini_is_gsharedvt_klass (cfg, klass);
3978 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (klass, for_box, known_instance_size);
3980 if (cfg->opt & MONO_OPT_SHARED)
3981 rgctx_info = MONO_RGCTX_INFO_KLASS;
3983 rgctx_info = MONO_RGCTX_INFO_VTABLE;
3984 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
3986 if (cfg->opt & MONO_OPT_SHARED) {
3987 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3989 alloc_ftn = mono_object_new;
3992 alloc_ftn = mono_object_new_specific;
3995 if (managed_alloc && !(cfg->opt & MONO_OPT_SHARED)) {
3996 if (known_instance_size) {
3997 int size = mono_class_instance_size (klass);
3999 EMIT_NEW_ICONST (cfg, iargs [1], mono_gc_get_aligned_size_for_allocator (size));
4001 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
4004 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
4007 if (cfg->opt & MONO_OPT_SHARED) {
4008 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4009 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
4011 alloc_ftn = mono_object_new;
4012 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
4013 /* This happens often in argument checking code, eg. throw new FooException... */
4014 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
4015 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
4016 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
4018 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
4019 MonoMethod *managed_alloc = NULL;
4023 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4024 cfg->exception_ptr = klass;
4028 #ifndef MONO_CROSS_COMPILE
4029 managed_alloc = mono_gc_get_managed_allocator (klass, for_box, TRUE);
4032 if (managed_alloc) {
4033 int size = mono_class_instance_size (klass);
4035 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4036 EMIT_NEW_ICONST (cfg, iargs [1], mono_gc_get_aligned_size_for_allocator (size));
4037 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
4039 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
4041 guint32 lw = vtable->klass->instance_size;
4042 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
4043 EMIT_NEW_ICONST (cfg, iargs [0], lw);
4044 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
4047 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4051 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
4055 * Returns NULL and set the cfg exception on error.
4058 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used, MonoBasicBlock **out_cbb)
4060 MonoInst *alloc, *ins;
4062 *out_cbb = cfg->cbb;
4064 if (mono_class_is_nullable (klass)) {
4065 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
4068 /* FIXME: What if the class is shared? We might not
4069 have to get the method address from the RGCTX. */
4070 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
4071 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
4072 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
4074 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
4076 gboolean pass_vtable, pass_mrgctx;
4077 MonoInst *rgctx_arg = NULL;
4079 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
4080 g_assert (!pass_mrgctx);
4083 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4086 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
4089 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
4093 if (mini_is_gsharedvt_klass (cfg, klass)) {
4094 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
4095 MonoInst *res, *is_ref, *src_var, *addr;
4098 dreg = alloc_ireg (cfg);
4100 NEW_BBLOCK (cfg, is_ref_bb);
4101 NEW_BBLOCK (cfg, is_nullable_bb);
4102 NEW_BBLOCK (cfg, end_bb);
4103 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
4104 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 1);
4105 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
4107 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 2);
4108 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
4111 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4114 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4115 ins->opcode = OP_STOREV_MEMBASE;
4117 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, alloc->dreg);
4118 res->type = STACK_OBJ;
4120 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4123 MONO_START_BB (cfg, is_ref_bb);
4125 /* val is a vtype, so has to load the value manually */
4126 src_var = get_vreg_to_inst (cfg, val->dreg);
4128 src_var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, val->dreg);
4129 EMIT_NEW_VARLOADA (cfg, addr, src_var, src_var->inst_vtype);
4130 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, addr->dreg, 0);
4131 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4134 MONO_START_BB (cfg, is_nullable_bb);
4137 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass,
4138 MONO_RGCTX_INFO_NULLABLE_CLASS_BOX);
4140 MonoMethodSignature *box_sig;
4143 * klass is Nullable<T>, need to call Nullable<T>.Box () using a gsharedvt signature, but we cannot
4144 * construct that method at JIT time, so have to do things by hand.
4146 box_sig = mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
4147 box_sig->ret = &mono_defaults.object_class->byval_arg;
4148 box_sig->param_count = 1;
4149 box_sig->params [0] = &klass->byval_arg;
4150 box_call = mono_emit_calli (cfg, box_sig, &val, addr, NULL, NULL);
4151 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, box_call->dreg);
4152 res->type = STACK_OBJ;
4156 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4158 MONO_START_BB (cfg, end_bb);
4160 *out_cbb = cfg->cbb;
4164 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4168 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4175 mini_class_has_reference_variant_generic_argument (MonoCompile *cfg, MonoClass *klass, int context_used)
4178 MonoGenericContainer *container;
4179 MonoGenericInst *ginst;
4181 if (klass->generic_class) {
4182 container = klass->generic_class->container_class->generic_container;
4183 ginst = klass->generic_class->context.class_inst;
4184 } else if (klass->generic_container && context_used) {
4185 container = klass->generic_container;
4186 ginst = container->context.class_inst;
4191 for (i = 0; i < container->type_argc; ++i) {
4193 if (!(mono_generic_container_get_param_info (container, i)->flags & (MONO_GEN_PARAM_VARIANT|MONO_GEN_PARAM_COVARIANT)))
4195 type = ginst->type_argv [i];
4196 if (mini_type_is_reference (cfg, type))
4202 #define is_complex_isinst(klass) ((klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || mono_class_is_marshalbyref (klass) || (klass->flags & TYPE_ATTRIBUTE_SEALED) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
4205 emit_castclass_with_cache (MonoCompile *cfg, MonoClass *klass, MonoInst **args, MonoBasicBlock **out_bblock)
4207 MonoMethod *mono_castclass;
4210 mono_castclass = mono_marshal_get_castclass_with_cache ();
4212 save_cast_details (cfg, klass, args [0]->dreg, TRUE, out_bblock);
4213 res = mono_emit_method_call (cfg, mono_castclass, args, NULL);
4214 reset_cast_details (cfg);
4215 *out_bblock = cfg->cbb;
4221 emit_castclass_with_cache_nonshared (MonoCompile *cfg, MonoInst *obj, MonoClass *klass, MonoBasicBlock **out_bblock)
4230 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
4233 if (cfg->compile_aot) {
4234 /* Each CASTCLASS_CACHE patch needs a unique index which identifies the call site */
4235 cfg->castclass_cache_index ++;
4236 idx = (cfg->method_index << 16) | cfg->castclass_cache_index;
4237 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, GINT_TO_POINTER (idx));
4239 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
4242 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
4244 return emit_castclass_with_cache (cfg, klass, args, out_bblock);
4248 * Returns NULL and set the cfg exception on error.
4251 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, guint8 *ip, MonoBasicBlock **out_bb, int *inline_costs)
4253 MonoBasicBlock *is_null_bb;
4254 int obj_reg = src->dreg;
4255 int vtable_reg = alloc_preg (cfg);
4257 MonoInst *klass_inst = NULL, *res;
4258 MonoBasicBlock *bblock;
4262 context_used = mini_class_check_context_used (cfg, klass);
4264 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
4265 res = emit_castclass_with_cache_nonshared (cfg, src, klass, &bblock);
4266 (*inline_costs) += 2;
4269 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
4270 MonoMethod *mono_castclass;
4271 MonoInst *iargs [1];
4274 mono_castclass = mono_marshal_get_castclass (klass);
4277 save_cast_details (cfg, klass, src->dreg, TRUE, &bblock);
4278 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
4279 iargs, ip, cfg->real_offset, TRUE, &bblock);
4280 reset_cast_details (cfg);
4281 CHECK_CFG_EXCEPTION;
4282 g_assert (costs > 0);
4284 cfg->real_offset += 5;
4286 (*inline_costs) += costs;
4295 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
4296 MonoInst *cache_ins;
4298 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
4303 /* klass - it's the second element of the cache entry*/
4304 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
4307 args [2] = cache_ins;
4309 return emit_castclass_with_cache (cfg, klass, args, out_bb);
4312 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
4315 NEW_BBLOCK (cfg, is_null_bb);
4317 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4318 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
4320 save_cast_details (cfg, klass, obj_reg, FALSE, NULL);
4322 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4323 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4324 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
4326 int klass_reg = alloc_preg (cfg);
4328 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4330 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4331 /* the remoting code is broken, access the class for now */
4332 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4333 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4335 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4336 cfg->exception_ptr = klass;
4339 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4341 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4342 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4344 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
4346 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4347 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
4351 MONO_START_BB (cfg, is_null_bb);
4353 reset_cast_details (cfg);
4364 * Returns NULL and set the cfg exception on error.
4367 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
4370 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
4371 int obj_reg = src->dreg;
4372 int vtable_reg = alloc_preg (cfg);
4373 int res_reg = alloc_ireg_ref (cfg);
4374 MonoInst *klass_inst = NULL;
4379 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
4380 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
4381 MonoInst *cache_ins;
4383 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
4388 /* klass - it's the second element of the cache entry*/
4389 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
4392 args [2] = cache_ins;
4394 return mono_emit_method_call (cfg, mono_isinst, args, NULL);
4397 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
4400 NEW_BBLOCK (cfg, is_null_bb);
4401 NEW_BBLOCK (cfg, false_bb);
4402 NEW_BBLOCK (cfg, end_bb);
4404 /* Do the assignment at the beginning, so the other assignment can be if converted */
4405 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
4406 ins->type = STACK_OBJ;
4409 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4410 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
4412 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4414 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4415 g_assert (!context_used);
4416 /* the is_null_bb target simply copies the input register to the output */
4417 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
4419 int klass_reg = alloc_preg (cfg);
4422 int rank_reg = alloc_preg (cfg);
4423 int eclass_reg = alloc_preg (cfg);
4425 g_assert (!context_used);
4426 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
4427 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
4428 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4429 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4430 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, cast_class));
4431 if (klass->cast_class == mono_defaults.object_class) {
4432 int parent_reg = alloc_preg (cfg);
4433 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, MONO_STRUCT_OFFSET (MonoClass, parent));
4434 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
4435 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4436 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4437 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
4438 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
4439 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4440 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4441 } else if (klass->cast_class == mono_defaults.enum_class) {
4442 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4443 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4444 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
4445 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4447 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
4448 /* Check that the object is a vector too */
4449 int bounds_reg = alloc_preg (cfg);
4450 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, MONO_STRUCT_OFFSET (MonoArray, bounds));
4451 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
4452 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4455 /* the is_null_bb target simply copies the input register to the output */
4456 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4458 } else if (mono_class_is_nullable (klass)) {
4459 g_assert (!context_used);
4460 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4461 /* the is_null_bb target simply copies the input register to the output */
4462 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
4464 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4465 g_assert (!context_used);
4466 /* the remoting code is broken, access the class for now */
4467 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4468 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4470 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4471 cfg->exception_ptr = klass;
4474 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4476 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4477 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4479 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4480 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
4482 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4483 /* the is_null_bb target simply copies the input register to the output */
4484 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
4489 MONO_START_BB (cfg, false_bb);
4491 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
4492 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4494 MONO_START_BB (cfg, is_null_bb);
4496 MONO_START_BB (cfg, end_bb);
4502 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4504 /* This opcode takes as input an object reference and a class, and returns:
4505 0) if the object is an instance of the class,
4506 1) if the object is not instance of the class,
4507 2) if the object is a proxy whose type cannot be determined */
4510 #ifndef DISABLE_REMOTING
4511 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
4513 MonoBasicBlock *true_bb, *false_bb, *end_bb;
4515 int obj_reg = src->dreg;
4516 int dreg = alloc_ireg (cfg);
4518 #ifndef DISABLE_REMOTING
4519 int klass_reg = alloc_preg (cfg);
4522 NEW_BBLOCK (cfg, true_bb);
4523 NEW_BBLOCK (cfg, false_bb);
4524 NEW_BBLOCK (cfg, end_bb);
4525 #ifndef DISABLE_REMOTING
4526 NEW_BBLOCK (cfg, false2_bb);
4527 NEW_BBLOCK (cfg, no_proxy_bb);
4530 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4531 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
4533 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4534 #ifndef DISABLE_REMOTING
4535 NEW_BBLOCK (cfg, interface_fail_bb);
4538 tmp_reg = alloc_preg (cfg);
4539 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4540 #ifndef DISABLE_REMOTING
4541 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
4542 MONO_START_BB (cfg, interface_fail_bb);
4543 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4545 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
4547 tmp_reg = alloc_preg (cfg);
4548 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4549 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4550 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
4552 mini_emit_iface_cast (cfg, tmp_reg, klass, false_bb, true_bb);
4555 #ifndef DISABLE_REMOTING
4556 tmp_reg = alloc_preg (cfg);
4557 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4558 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4560 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4561 tmp_reg = alloc_preg (cfg);
4562 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4563 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4565 tmp_reg = alloc_preg (cfg);
4566 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4567 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4568 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4570 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
4571 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
4573 MONO_START_BB (cfg, no_proxy_bb);
4575 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
4577 g_error ("transparent proxy support is disabled while trying to JIT code that uses it");
4581 MONO_START_BB (cfg, false_bb);
4583 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4584 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4586 #ifndef DISABLE_REMOTING
4587 MONO_START_BB (cfg, false2_bb);
4589 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
4590 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4593 MONO_START_BB (cfg, true_bb);
4595 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4597 MONO_START_BB (cfg, end_bb);
4600 MONO_INST_NEW (cfg, ins, OP_ICONST);
4602 ins->type = STACK_I4;
4608 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4610 /* This opcode takes as input an object reference and a class, and returns:
4611 0) if the object is an instance of the class,
4612 1) if the object is a proxy whose type cannot be determined
4613 an InvalidCastException exception is thrown otherwhise*/
4616 #ifndef DISABLE_REMOTING
4617 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
4619 MonoBasicBlock *ok_result_bb;
4621 int obj_reg = src->dreg;
4622 int dreg = alloc_ireg (cfg);
4623 int tmp_reg = alloc_preg (cfg);
4625 #ifndef DISABLE_REMOTING
4626 int klass_reg = alloc_preg (cfg);
4627 NEW_BBLOCK (cfg, end_bb);
4630 NEW_BBLOCK (cfg, ok_result_bb);
4632 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4633 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
4635 save_cast_details (cfg, klass, obj_reg, FALSE, NULL);
4637 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4638 #ifndef DISABLE_REMOTING
4639 NEW_BBLOCK (cfg, interface_fail_bb);
4641 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4642 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
4643 MONO_START_BB (cfg, interface_fail_bb);
4644 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4646 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
4648 tmp_reg = alloc_preg (cfg);
4649 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4650 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4651 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
4653 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4654 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4656 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4657 mini_emit_iface_cast (cfg, tmp_reg, klass, NULL, NULL);
4658 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, ok_result_bb);
4661 #ifndef DISABLE_REMOTING
4662 NEW_BBLOCK (cfg, no_proxy_bb);
4664 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4665 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4666 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4668 tmp_reg = alloc_preg (cfg);
4669 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4670 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4672 tmp_reg = alloc_preg (cfg);
4673 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4674 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4675 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4677 NEW_BBLOCK (cfg, fail_1_bb);
4679 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
4681 MONO_START_BB (cfg, fail_1_bb);
4683 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4684 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4686 MONO_START_BB (cfg, no_proxy_bb);
4688 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
4690 g_error ("Transparent proxy support is disabled while trying to JIT code that uses it");
4694 MONO_START_BB (cfg, ok_result_bb);
4696 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4698 #ifndef DISABLE_REMOTING
4699 MONO_START_BB (cfg, end_bb);
4703 MONO_INST_NEW (cfg, ins, OP_ICONST);
4705 ins->type = STACK_I4;
4710 static G_GNUC_UNUSED MonoInst*
4711 handle_enum_has_flag (MonoCompile *cfg, MonoClass *klass, MonoInst *enum_this, MonoInst *enum_flag)
4713 MonoType *enum_type = mono_type_get_underlying_type (&klass->byval_arg);
4714 guint32 load_opc = mono_type_to_load_membase (cfg, enum_type);
4717 switch (enum_type->type) {
4720 #if SIZEOF_REGISTER == 8
4732 MonoInst *load, *and, *cmp, *ceq;
4733 int enum_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
4734 int and_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
4735 int dest_reg = alloc_ireg (cfg);
4737 EMIT_NEW_LOAD_MEMBASE (cfg, load, load_opc, enum_reg, enum_this->dreg, 0);
4738 EMIT_NEW_BIALU (cfg, and, is_i4 ? OP_IAND : OP_LAND, and_reg, enum_reg, enum_flag->dreg);
4739 EMIT_NEW_BIALU (cfg, cmp, is_i4 ? OP_ICOMPARE : OP_LCOMPARE, -1, and_reg, enum_flag->dreg);
4740 EMIT_NEW_UNALU (cfg, ceq, is_i4 ? OP_ICEQ : OP_LCEQ, dest_reg, -1);
4742 ceq->type = STACK_I4;
4745 load = mono_decompose_opcode (cfg, load, NULL);
4746 and = mono_decompose_opcode (cfg, and, NULL);
4747 cmp = mono_decompose_opcode (cfg, cmp, NULL);
4748 ceq = mono_decompose_opcode (cfg, ceq, NULL);
4756 * Returns NULL and set the cfg exception on error.
4758 static G_GNUC_UNUSED MonoInst*
4759 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used, gboolean virtual)
4763 gpointer trampoline;
4764 MonoInst *obj, *method_ins, *tramp_ins;
4768 // FIXME reenable optimisation for virtual case
4773 MonoMethod *invoke = mono_get_delegate_invoke (klass);
4776 if (!mono_get_delegate_virtual_invoke_impl (mono_method_signature (invoke), context_used ? NULL : method))
4780 obj = handle_alloc (cfg, klass, FALSE, 0);
4784 /* Inline the contents of mono_delegate_ctor */
4786 /* Set target field */
4787 /* Optimize away setting of NULL target */
4788 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0)) {
4789 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
4790 if (cfg->gen_write_barriers) {
4791 dreg = alloc_preg (cfg);
4792 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target));
4793 emit_write_barrier (cfg, ptr, target);
4797 /* Set method field */
4798 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
4799 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
4802 * To avoid looking up the compiled code belonging to the target method
4803 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
4804 * store it, and we fill it after the method has been compiled.
4806 if (!method->dynamic && !(cfg->opt & MONO_OPT_SHARED)) {
4807 MonoInst *code_slot_ins;
4810 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
4812 domain = mono_domain_get ();
4813 mono_domain_lock (domain);
4814 if (!domain_jit_info (domain)->method_code_hash)
4815 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
4816 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
4818 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
4819 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
4821 mono_domain_unlock (domain);
4823 if (cfg->compile_aot)
4824 EMIT_NEW_AOTCONST (cfg, code_slot_ins, MONO_PATCH_INFO_METHOD_CODE_SLOT, method);
4826 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
4828 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
4831 if (cfg->compile_aot) {
4832 MonoDelegateClassMethodPair *del_tramp;
4834 del_tramp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoDelegateClassMethodPair));
4835 del_tramp->klass = klass;
4836 del_tramp->method = context_used ? NULL : method;
4837 del_tramp->virtual = virtual;
4838 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, del_tramp);
4841 trampoline = mono_create_delegate_virtual_trampoline (cfg->domain, klass, context_used ? NULL : method);
4843 trampoline = mono_create_delegate_trampoline_info (cfg->domain, klass, context_used ? NULL : method);
4844 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
4847 /* Set invoke_impl field */
4849 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
4851 dreg = alloc_preg (cfg);
4852 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, invoke_impl));
4853 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), dreg);
4855 dreg = alloc_preg (cfg);
4856 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, method_ptr));
4857 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr), dreg);
4860 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
4866 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
4868 MonoJitICallInfo *info;
4870 /* Need to register the icall so it gets an icall wrapper */
4871 info = mono_get_array_new_va_icall (rank);
4873 cfg->flags |= MONO_CFG_HAS_VARARGS;
4875 /* mono_array_new_va () needs a vararg calling convention */
4876 cfg->disable_llvm = TRUE;
4878 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
4879 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
4883 * handle_constrained_gsharedvt_call:
4885 * Handle constrained calls where the receiver is a gsharedvt type.
4886 * Return the instruction representing the call. Set the cfg exception on failure.
4889 handle_constrained_gsharedvt_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp, MonoClass *constrained_class,
4890 gboolean *ref_emit_widen, MonoBasicBlock **ref_bblock)
4892 MonoInst *ins = NULL;
4893 MonoBasicBlock *bblock = *ref_bblock;
4894 gboolean emit_widen = *ref_emit_widen;
4897 * Constrained calls need to behave differently at runtime dependending on whenever the receiver is instantiated as ref type or as a vtype.
4898 * This is hard to do with the current call code, since we would have to emit a branch and two different calls. So instead, we
4899 * pack the arguments into an array, and do the rest of the work in in an icall.
4901 if (((cmethod->klass == mono_defaults.object_class) || (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) || (!cmethod->klass->valuetype && cmethod->klass->image != mono_defaults.corlib)) &&
4902 (MONO_TYPE_IS_VOID (fsig->ret) || MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_IS_REFERENCE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret) || mini_is_gsharedvt_type (cfg, fsig->ret)) &&
4903 (fsig->param_count == 0 || (!fsig->hasthis && fsig->param_count == 1) || (fsig->param_count == 1 && (MONO_TYPE_IS_REFERENCE (fsig->params [0]) || fsig->params [0]->byref || mini_is_gsharedvt_type (cfg, fsig->params [0]))))) {
4904 MonoInst *args [16];
4907 * This case handles calls to
4908 * - object:ToString()/Equals()/GetHashCode(),
4909 * - System.IComparable<T>:CompareTo()
4910 * - System.IEquatable<T>:Equals ()
4911 * plus some simple interface calls enough to support AsyncTaskMethodBuilder.
4915 if (mono_method_check_context_used (cmethod))
4916 args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (cmethod), cmethod, MONO_RGCTX_INFO_METHOD);
4918 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
4919 args [2] = emit_get_rgctx_klass (cfg, mono_class_check_context_used (constrained_class), constrained_class, MONO_RGCTX_INFO_KLASS);
4921 /* !fsig->hasthis is for the wrapper for the Object.GetType () icall */
4922 if (fsig->hasthis && fsig->param_count) {
4923 /* Pass the arguments using a localloc-ed array using the format expected by runtime_invoke () */
4924 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
4925 ins->dreg = alloc_preg (cfg);
4926 ins->inst_imm = fsig->param_count * sizeof (mgreg_t);
4927 MONO_ADD_INS (cfg->cbb, ins);
4930 if (mini_is_gsharedvt_type (cfg, fsig->params [0])) {
4933 args [3] = emit_get_gsharedvt_info_klass (cfg, mono_class_from_mono_type (fsig->params [0]), MONO_RGCTX_INFO_CLASS_BOX_TYPE);
4935 EMIT_NEW_VARLOADA_VREG (cfg, ins, sp [1]->dreg, fsig->params [0]);
4936 addr_reg = ins->dreg;
4937 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, addr_reg);
4939 EMIT_NEW_ICONST (cfg, args [3], 0);
4940 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, sp [1]->dreg);
4943 EMIT_NEW_ICONST (cfg, args [3], 0);
4944 EMIT_NEW_ICONST (cfg, args [4], 0);
4946 ins = mono_emit_jit_icall (cfg, mono_gsharedvt_constrained_call, args);
4949 if (mini_is_gsharedvt_type (cfg, fsig->ret)) {
4950 ins = handle_unbox_gsharedvt (cfg, mono_class_from_mono_type (fsig->ret), ins, &bblock);
4951 } else if (MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret)) {
4955 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), ins->dreg, sizeof (MonoObject));
4956 MONO_ADD_INS (cfg->cbb, add);
4958 NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, add->dreg, 0);
4959 MONO_ADD_INS (cfg->cbb, ins);
4960 /* ins represents the call result */
4963 GSHAREDVT_FAILURE (CEE_CALLVIRT);
4966 *ref_emit_widen = emit_widen;
4967 *ref_bblock = bblock;
4976 mono_emit_load_got_addr (MonoCompile *cfg)
4978 MonoInst *getaddr, *dummy_use;
4980 if (!cfg->got_var || cfg->got_var_allocated)
4983 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
4984 getaddr->cil_code = cfg->header->code;
4985 getaddr->dreg = cfg->got_var->dreg;
4987 /* Add it to the start of the first bblock */
4988 if (cfg->bb_entry->code) {
4989 getaddr->next = cfg->bb_entry->code;
4990 cfg->bb_entry->code = getaddr;
4993 MONO_ADD_INS (cfg->bb_entry, getaddr);
4995 cfg->got_var_allocated = TRUE;
4998 * Add a dummy use to keep the got_var alive, since real uses might
4999 * only be generated by the back ends.
5000 * Add it to end_bblock, so the variable's lifetime covers the whole
5002 * It would be better to make the usage of the got var explicit in all
5003 * cases when the backend needs it (i.e. calls, throw etc.), so this
5004 * wouldn't be needed.
5006 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
5007 MONO_ADD_INS (cfg->bb_exit, dummy_use);
5010 static int inline_limit;
5011 static gboolean inline_limit_inited;
5014 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
5016 MonoMethodHeaderSummary header;
5018 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
5019 MonoMethodSignature *sig = mono_method_signature (method);
5023 if (cfg->disable_inline)
5025 if (cfg->generic_sharing_context)
5028 if (cfg->inline_depth > 10)
5031 #ifdef MONO_ARCH_HAVE_LMF_OPS
5032 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
5033 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
5034 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
5039 if (!mono_method_get_header_summary (method, &header))
5042 /*runtime, icall and pinvoke are checked by summary call*/
5043 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
5044 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
5045 (mono_class_is_marshalbyref (method->klass)) ||
5049 /* also consider num_locals? */
5050 /* Do the size check early to avoid creating vtables */
5051 if (!inline_limit_inited) {
5052 if (g_getenv ("MONO_INLINELIMIT"))
5053 inline_limit = atoi (g_getenv ("MONO_INLINELIMIT"));
5055 inline_limit = INLINE_LENGTH_LIMIT;
5056 inline_limit_inited = TRUE;
5058 if (header.code_size >= inline_limit && !(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
5062 * if we can initialize the class of the method right away, we do,
5063 * otherwise we don't allow inlining if the class needs initialization,
5064 * since it would mean inserting a call to mono_runtime_class_init()
5065 * inside the inlined code
5067 if (!(cfg->opt & MONO_OPT_SHARED)) {
5068 /* The AggressiveInlining hint is a good excuse to force that cctor to run. */
5069 if (method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING) {
5070 vtable = mono_class_vtable (cfg->domain, method->klass);
5073 if (!cfg->compile_aot)
5074 mono_runtime_class_init (vtable);
5075 } else if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
5076 if (cfg->run_cctors && method->klass->has_cctor) {
5077 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
5078 if (!method->klass->runtime_info)
5079 /* No vtable created yet */
5081 vtable = mono_class_vtable (cfg->domain, method->klass);
5084 /* This makes so that inline cannot trigger */
5085 /* .cctors: too many apps depend on them */
5086 /* running with a specific order... */
5087 if (! vtable->initialized)
5089 mono_runtime_class_init (vtable);
5091 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
5092 if (!method->klass->runtime_info)
5093 /* No vtable created yet */
5095 vtable = mono_class_vtable (cfg->domain, method->klass);
5098 if (!vtable->initialized)
5103 * If we're compiling for shared code
5104 * the cctor will need to be run at aot method load time, for example,
5105 * or at the end of the compilation of the inlining method.
5107 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
5112 * CAS - do not inline methods with declarative security
5113 * Note: this has to be before any possible return TRUE;
5115 if (mono_security_method_has_declsec (method))
5118 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
5119 if (mono_arch_is_soft_float ()) {
5121 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
5123 for (i = 0; i < sig->param_count; ++i)
5124 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
5129 if (g_list_find (cfg->dont_inline, method))
5136 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoClass *klass, MonoVTable *vtable)
5138 if (!cfg->compile_aot) {
5140 if (vtable->initialized)
5144 if (klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
5145 if (cfg->method == method)
5149 if (!mono_class_needs_cctor_run (klass, method))
5152 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (klass == method->klass))
5153 /* The initialization is already done before the method is called */
5160 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
5164 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
5167 if (mini_is_gsharedvt_variable_klass (cfg, klass)) {
5170 mono_class_init (klass);
5171 size = mono_class_array_element_size (klass);
5174 mult_reg = alloc_preg (cfg);
5175 array_reg = arr->dreg;
5176 index_reg = index->dreg;
5178 #if SIZEOF_REGISTER == 8
5179 /* The array reg is 64 bits but the index reg is only 32 */
5180 if (COMPILE_LLVM (cfg)) {
5182 index2_reg = index_reg;
5184 index2_reg = alloc_preg (cfg);
5185 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
5188 if (index->type == STACK_I8) {
5189 index2_reg = alloc_preg (cfg);
5190 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
5192 index2_reg = index_reg;
5197 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
5199 #if defined(TARGET_X86) || defined(TARGET_AMD64)
5200 if (size == 1 || size == 2 || size == 4 || size == 8) {
5201 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
5203 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], MONO_STRUCT_OFFSET (MonoArray, vector));
5204 ins->klass = mono_class_get_element_class (klass);
5205 ins->type = STACK_MP;
5211 add_reg = alloc_ireg_mp (cfg);
5214 MonoInst *rgctx_ins;
5217 g_assert (cfg->generic_sharing_context);
5218 context_used = mini_class_check_context_used (cfg, klass);
5219 g_assert (context_used);
5220 rgctx_ins = emit_get_gsharedvt_info (cfg, &klass->byval_arg, MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE);
5221 MONO_EMIT_NEW_BIALU (cfg, OP_IMUL, mult_reg, index2_reg, rgctx_ins->dreg);
5223 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
5225 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
5226 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
5227 ins->klass = mono_class_get_element_class (klass);
5228 ins->type = STACK_MP;
5229 MONO_ADD_INS (cfg->cbb, ins);
5234 #ifndef MONO_ARCH_EMULATE_MUL_DIV
5236 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
5238 int bounds_reg = alloc_preg (cfg);
5239 int add_reg = alloc_ireg_mp (cfg);
5240 int mult_reg = alloc_preg (cfg);
5241 int mult2_reg = alloc_preg (cfg);
5242 int low1_reg = alloc_preg (cfg);
5243 int low2_reg = alloc_preg (cfg);
5244 int high1_reg = alloc_preg (cfg);
5245 int high2_reg = alloc_preg (cfg);
5246 int realidx1_reg = alloc_preg (cfg);
5247 int realidx2_reg = alloc_preg (cfg);
5248 int sum_reg = alloc_preg (cfg);
5249 int index1, index2, tmpreg;
5253 mono_class_init (klass);
5254 size = mono_class_array_element_size (klass);
5256 index1 = index_ins1->dreg;
5257 index2 = index_ins2->dreg;
5259 #if SIZEOF_REGISTER == 8
5260 /* The array reg is 64 bits but the index reg is only 32 */
5261 if (COMPILE_LLVM (cfg)) {
5264 tmpreg = alloc_preg (cfg);
5265 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index1);
5267 tmpreg = alloc_preg (cfg);
5268 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index2);
5272 // FIXME: Do we need to do something here for i8 indexes, like in ldelema_1_ins ?
5276 /* range checking */
5277 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
5278 arr->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
5280 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
5281 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5282 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
5283 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
5284 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5285 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
5286 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
5288 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
5289 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5290 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
5291 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
5292 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5293 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
5294 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
5296 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
5297 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
5298 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
5299 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
5300 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
5302 ins->type = STACK_MP;
5304 MONO_ADD_INS (cfg->cbb, ins);
5311 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
5315 MonoMethod *addr_method;
5317 MonoClass *eclass = cmethod->klass->element_class;
5319 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
5322 return mini_emit_ldelema_1_ins (cfg, eclass, sp [0], sp [1], TRUE);
5324 #ifndef MONO_ARCH_EMULATE_MUL_DIV
5325 /* emit_ldelema_2 depends on OP_LMUL */
5326 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS) && !mini_is_gsharedvt_variable_klass (cfg, eclass)) {
5327 return mini_emit_ldelema_2_ins (cfg, eclass, sp [0], sp [1], sp [2]);
5331 if (mini_is_gsharedvt_variable_klass (cfg, eclass))
5334 element_size = mono_class_array_element_size (eclass);
5335 addr_method = mono_marshal_get_array_address (rank, element_size);
5336 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
5341 static MonoBreakPolicy
5342 always_insert_breakpoint (MonoMethod *method)
5344 return MONO_BREAK_POLICY_ALWAYS;
5347 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
5350 * mono_set_break_policy:
5351 * policy_callback: the new callback function
5353 * Allow embedders to decide wherther to actually obey breakpoint instructions
5354 * (both break IL instructions and Debugger.Break () method calls), for example
5355 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
5356 * untrusted or semi-trusted code.
5358 * @policy_callback will be called every time a break point instruction needs to
5359 * be inserted with the method argument being the method that calls Debugger.Break()
5360 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
5361 * if it wants the breakpoint to not be effective in the given method.
5362 * #MONO_BREAK_POLICY_ALWAYS is the default.
5365 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
5367 if (policy_callback)
5368 break_policy_func = policy_callback;
5370 break_policy_func = always_insert_breakpoint;
5374 should_insert_brekpoint (MonoMethod *method) {
5375 switch (break_policy_func (method)) {
5376 case MONO_BREAK_POLICY_ALWAYS:
5378 case MONO_BREAK_POLICY_NEVER:
5380 case MONO_BREAK_POLICY_ON_DBG:
5381 g_warning ("mdb no longer supported");
5384 g_warning ("Incorrect value returned from break policy callback");
5389 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
5391 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
5393 MonoInst *addr, *store, *load;
5394 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
5396 /* the bounds check is already done by the callers */
5397 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
5399 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
5400 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
5401 if (mini_type_is_reference (cfg, fsig->params [2]))
5402 emit_write_barrier (cfg, addr, load);
5404 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
5405 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
5412 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
5414 return mini_type_is_reference (cfg, &klass->byval_arg);
5418 emit_array_store (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, gboolean safety_checks)
5420 if (safety_checks && generic_class_is_reference_type (cfg, klass) &&
5421 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
5422 MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class, 1);
5423 MonoMethod *helper = mono_marshal_get_virtual_stelemref (obj_array);
5424 MonoInst *iargs [3];
5427 mono_class_setup_vtable (obj_array);
5428 g_assert (helper->slot);
5430 if (sp [0]->type != STACK_OBJ)
5432 if (sp [2]->type != STACK_OBJ)
5439 return mono_emit_method_call (cfg, helper, iargs, sp [0]);
5443 if (mini_is_gsharedvt_variable_klass (cfg, klass)) {
5446 // FIXME-VT: OP_ICONST optimization
5447 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
5448 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5449 ins->opcode = OP_STOREV_MEMBASE;
5450 } else if (sp [1]->opcode == OP_ICONST) {
5451 int array_reg = sp [0]->dreg;
5452 int index_reg = sp [1]->dreg;
5453 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
5456 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
5457 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
5459 MonoInst *addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], safety_checks);
5460 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5461 if (generic_class_is_reference_type (cfg, klass))
5462 emit_write_barrier (cfg, addr, sp [2]);
5469 emit_array_unsafe_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
5474 eklass = mono_class_from_mono_type (fsig->params [2]);
5476 eklass = mono_class_from_mono_type (fsig->ret);
5479 return emit_array_store (cfg, eklass, args, FALSE);
5481 MonoInst *ins, *addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
5482 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &eklass->byval_arg, addr->dreg, 0);
5488 is_unsafe_mov_compatible (MonoClass *param_klass, MonoClass *return_klass)
5492 //Only allow for valuetypes
5493 if (!param_klass->valuetype || !return_klass->valuetype)
5497 if (param_klass->has_references || return_klass->has_references)
5500 /* Avoid mixing structs and primitive types/enums, they need to be handled differently in the JIT */
5501 if ((MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && !MONO_TYPE_ISSTRUCT (&return_klass->byval_arg)) ||
5502 (!MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && MONO_TYPE_ISSTRUCT (&return_klass->byval_arg)))
5505 if (param_klass->byval_arg.type == MONO_TYPE_R4 || param_klass->byval_arg.type == MONO_TYPE_R8 ||
5506 return_klass->byval_arg.type == MONO_TYPE_R4 || return_klass->byval_arg.type == MONO_TYPE_R8)
5509 //And have the same size
5510 if (mono_class_value_size (param_klass, &align) != mono_class_value_size (return_klass, &align))
5516 emit_array_unsafe_mov (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args)
5518 MonoClass *param_klass = mono_class_from_mono_type (fsig->params [0]);
5519 MonoClass *return_klass = mono_class_from_mono_type (fsig->ret);
5521 //Valuetypes that are semantically equivalent
5522 if (is_unsafe_mov_compatible (param_klass, return_klass))
5525 //Arrays of valuetypes that are semantically equivalent
5526 if (param_klass->rank == 1 && return_klass->rank == 1 && is_unsafe_mov_compatible (param_klass->element_class, return_klass->element_class))
5533 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5535 #ifdef MONO_ARCH_SIMD_INTRINSICS
5536 MonoInst *ins = NULL;
5538 if (cfg->opt & MONO_OPT_SIMD) {
5539 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
5545 return mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
5549 emit_memory_barrier (MonoCompile *cfg, int kind)
5551 MonoInst *ins = NULL;
5552 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
5553 MONO_ADD_INS (cfg->cbb, ins);
5554 ins->backend.memory_barrier_kind = kind;
5560 llvm_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5562 MonoInst *ins = NULL;
5565 /* The LLVM backend supports these intrinsics */
5566 if (cmethod->klass == mono_defaults.math_class) {
5567 if (strcmp (cmethod->name, "Sin") == 0) {
5569 } else if (strcmp (cmethod->name, "Cos") == 0) {
5571 } else if (strcmp (cmethod->name, "Sqrt") == 0) {
5573 } else if (strcmp (cmethod->name, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8) {
5577 if (opcode && fsig->param_count == 1) {
5578 MONO_INST_NEW (cfg, ins, opcode);
5579 ins->type = STACK_R8;
5580 ins->dreg = mono_alloc_freg (cfg);
5581 ins->sreg1 = args [0]->dreg;
5582 MONO_ADD_INS (cfg->cbb, ins);
5586 if (cfg->opt & MONO_OPT_CMOV) {
5587 if (strcmp (cmethod->name, "Min") == 0) {
5588 if (fsig->params [0]->type == MONO_TYPE_I4)
5590 if (fsig->params [0]->type == MONO_TYPE_U4)
5591 opcode = OP_IMIN_UN;
5592 else if (fsig->params [0]->type == MONO_TYPE_I8)
5594 else if (fsig->params [0]->type == MONO_TYPE_U8)
5595 opcode = OP_LMIN_UN;
5596 } else if (strcmp (cmethod->name, "Max") == 0) {
5597 if (fsig->params [0]->type == MONO_TYPE_I4)
5599 if (fsig->params [0]->type == MONO_TYPE_U4)
5600 opcode = OP_IMAX_UN;
5601 else if (fsig->params [0]->type == MONO_TYPE_I8)
5603 else if (fsig->params [0]->type == MONO_TYPE_U8)
5604 opcode = OP_LMAX_UN;
5608 if (opcode && fsig->param_count == 2) {
5609 MONO_INST_NEW (cfg, ins, opcode);
5610 ins->type = fsig->params [0]->type == MONO_TYPE_I4 ? STACK_I4 : STACK_I8;
5611 ins->dreg = mono_alloc_ireg (cfg);
5612 ins->sreg1 = args [0]->dreg;
5613 ins->sreg2 = args [1]->dreg;
5614 MONO_ADD_INS (cfg->cbb, ins);
5622 mini_emit_inst_for_sharable_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5624 if (cmethod->klass == mono_defaults.array_class) {
5625 if (strcmp (cmethod->name, "UnsafeStore") == 0)
5626 return emit_array_unsafe_access (cfg, fsig, args, TRUE);
5627 else if (strcmp (cmethod->name, "UnsafeLoad") == 0)
5628 return emit_array_unsafe_access (cfg, fsig, args, FALSE);
5629 else if (strcmp (cmethod->name, "UnsafeMov") == 0)
5630 return emit_array_unsafe_mov (cfg, fsig, args);
5637 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5639 MonoInst *ins = NULL;
5641 static MonoClass *runtime_helpers_class = NULL;
5642 if (! runtime_helpers_class)
5643 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
5644 "System.Runtime.CompilerServices", "RuntimeHelpers");
5646 if (cmethod->klass == mono_defaults.string_class) {
5647 if (strcmp (cmethod->name, "get_Chars") == 0 && fsig->param_count == 2) {
5648 int dreg = alloc_ireg (cfg);
5649 int index_reg = alloc_preg (cfg);
5650 int add_reg = alloc_preg (cfg);
5652 #if SIZEOF_REGISTER == 8
5653 /* The array reg is 64 bits but the index reg is only 32 */
5654 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
5656 index_reg = args [1]->dreg;
5658 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
5660 #if defined(TARGET_X86) || defined(TARGET_AMD64)
5661 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, MONO_STRUCT_OFFSET (MonoString, chars));
5662 add_reg = ins->dreg;
5663 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5666 int mult_reg = alloc_preg (cfg);
5667 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
5668 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
5669 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5670 add_reg, MONO_STRUCT_OFFSET (MonoString, chars));
5672 type_from_op (cfg, ins, NULL, NULL);
5674 } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count == 1) {
5675 int dreg = alloc_ireg (cfg);
5676 /* Decompose later to allow more optimizations */
5677 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
5678 ins->type = STACK_I4;
5679 ins->flags |= MONO_INST_FAULT;
5680 cfg->cbb->has_array_access = TRUE;
5681 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
5684 } else if (strcmp (cmethod->name, "InternalSetChar") == 0 && fsig->param_count == 3) {
5685 int mult_reg = alloc_preg (cfg);
5686 int add_reg = alloc_preg (cfg);
5688 /* The corlib functions check for oob already. */
5689 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
5690 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
5691 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, MONO_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
5692 return cfg->cbb->last_ins;
5695 } else if (cmethod->klass == mono_defaults.object_class) {
5697 if (strcmp (cmethod->name, "GetType") == 0 && fsig->param_count == 1) {
5698 int dreg = alloc_ireg_ref (cfg);
5699 int vt_reg = alloc_preg (cfg);
5700 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
5701 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, MONO_STRUCT_OFFSET (MonoVTable, type));
5702 type_from_op (cfg, ins, NULL, NULL);
5705 #if !defined(MONO_ARCH_EMULATE_MUL_DIV)
5706 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0 && fsig->param_count == 1 && !mono_gc_is_moving ()) {
5707 int dreg = alloc_ireg (cfg);
5708 int t1 = alloc_ireg (cfg);
5710 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
5711 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
5712 ins->type = STACK_I4;
5716 } else if (strcmp (cmethod->name, ".ctor") == 0 && fsig->param_count == 0) {
5717 MONO_INST_NEW (cfg, ins, OP_NOP);
5718 MONO_ADD_INS (cfg->cbb, ins);
5722 } else if (cmethod->klass == mono_defaults.array_class) {
5723 if (strcmp (cmethod->name, "GetGenericValueImpl") == 0 && fsig->param_count == 3 && !cfg->gsharedvt)
5724 return emit_array_generic_access (cfg, fsig, args, FALSE);
5725 else if (strcmp (cmethod->name, "SetGenericValueImpl") == 0 && fsig->param_count == 3 && !cfg->gsharedvt)
5726 return emit_array_generic_access (cfg, fsig, args, TRUE);
5728 #ifndef MONO_BIG_ARRAYS
5730 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
5733 else if (((strcmp (cmethod->name, "GetLength") == 0 && fsig->param_count == 2) ||
5734 (strcmp (cmethod->name, "GetLowerBound") == 0 && fsig->param_count == 2)) &&
5735 args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
5736 int dreg = alloc_ireg (cfg);
5737 int bounds_reg = alloc_ireg_mp (cfg);
5738 MonoBasicBlock *end_bb, *szarray_bb;
5739 gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
5741 NEW_BBLOCK (cfg, end_bb);
5742 NEW_BBLOCK (cfg, szarray_bb);
5744 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
5745 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
5746 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
5747 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
5748 /* Non-szarray case */
5750 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5751 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5753 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5754 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5755 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
5756 MONO_START_BB (cfg, szarray_bb);
5759 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5760 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
5762 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
5763 MONO_START_BB (cfg, end_bb);
5765 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
5766 ins->type = STACK_I4;
5772 if (cmethod->name [0] != 'g')
5775 if (strcmp (cmethod->name, "get_Rank") == 0 && fsig->param_count == 1) {
5776 int dreg = alloc_ireg (cfg);
5777 int vtable_reg = alloc_preg (cfg);
5778 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
5779 args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
5780 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
5781 vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
5782 type_from_op (cfg, ins, NULL, NULL);
5785 } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count == 1) {
5786 int dreg = alloc_ireg (cfg);
5788 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5789 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
5790 type_from_op (cfg, ins, NULL, NULL);
5795 } else if (cmethod->klass == runtime_helpers_class) {
5797 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0 && fsig->param_count == 0) {
5798 EMIT_NEW_ICONST (cfg, ins, MONO_STRUCT_OFFSET (MonoString, chars));
5802 } else if (cmethod->klass == mono_defaults.thread_class) {
5803 if (strcmp (cmethod->name, "SpinWait_nop") == 0 && fsig->param_count == 0) {
5804 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
5805 MONO_ADD_INS (cfg->cbb, ins);
5807 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0) {
5808 return emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5809 } else if (!strcmp (cmethod->name, "VolatileRead") && fsig->param_count == 1) {
5811 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [0]);
5813 if (fsig->params [0]->type == MONO_TYPE_I1)
5814 opcode = OP_LOADI1_MEMBASE;
5815 else if (fsig->params [0]->type == MONO_TYPE_U1)
5816 opcode = OP_LOADU1_MEMBASE;
5817 else if (fsig->params [0]->type == MONO_TYPE_I2)
5818 opcode = OP_LOADI2_MEMBASE;
5819 else if (fsig->params [0]->type == MONO_TYPE_U2)
5820 opcode = OP_LOADU2_MEMBASE;
5821 else if (fsig->params [0]->type == MONO_TYPE_I4)
5822 opcode = OP_LOADI4_MEMBASE;
5823 else if (fsig->params [0]->type == MONO_TYPE_U4)
5824 opcode = OP_LOADU4_MEMBASE;
5825 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
5826 opcode = OP_LOADI8_MEMBASE;
5827 else if (fsig->params [0]->type == MONO_TYPE_R4)
5828 opcode = OP_LOADR4_MEMBASE;
5829 else if (fsig->params [0]->type == MONO_TYPE_R8)
5830 opcode = OP_LOADR8_MEMBASE;
5831 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
5832 opcode = OP_LOAD_MEMBASE;
5835 MONO_INST_NEW (cfg, ins, opcode);
5836 ins->inst_basereg = args [0]->dreg;
5837 ins->inst_offset = 0;
5838 MONO_ADD_INS (cfg->cbb, ins);
5840 switch (fsig->params [0]->type) {
5847 ins->dreg = mono_alloc_ireg (cfg);
5848 ins->type = STACK_I4;
5852 ins->dreg = mono_alloc_lreg (cfg);
5853 ins->type = STACK_I8;
5857 ins->dreg = mono_alloc_ireg (cfg);
5858 #if SIZEOF_REGISTER == 8
5859 ins->type = STACK_I8;
5861 ins->type = STACK_I4;
5866 ins->dreg = mono_alloc_freg (cfg);
5867 ins->type = STACK_R8;
5870 g_assert (mini_type_is_reference (cfg, fsig->params [0]));
5871 ins->dreg = mono_alloc_ireg_ref (cfg);
5872 ins->type = STACK_OBJ;
5876 if (opcode == OP_LOADI8_MEMBASE)
5877 ins = mono_decompose_opcode (cfg, ins, NULL);
5879 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
5883 } else if (!strcmp (cmethod->name, "VolatileWrite") && fsig->param_count == 2) {
5885 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [0]);
5887 if (fsig->params [0]->type == MONO_TYPE_I1 || fsig->params [0]->type == MONO_TYPE_U1)
5888 opcode = OP_STOREI1_MEMBASE_REG;
5889 else if (fsig->params [0]->type == MONO_TYPE_I2 || fsig->params [0]->type == MONO_TYPE_U2)
5890 opcode = OP_STOREI2_MEMBASE_REG;
5891 else if (fsig->params [0]->type == MONO_TYPE_I4 || fsig->params [0]->type == MONO_TYPE_U4)
5892 opcode = OP_STOREI4_MEMBASE_REG;
5893 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
5894 opcode = OP_STOREI8_MEMBASE_REG;
5895 else if (fsig->params [0]->type == MONO_TYPE_R4)
5896 opcode = OP_STORER4_MEMBASE_REG;
5897 else if (fsig->params [0]->type == MONO_TYPE_R8)
5898 opcode = OP_STORER8_MEMBASE_REG;
5899 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
5900 opcode = OP_STORE_MEMBASE_REG;
5903 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
5905 MONO_INST_NEW (cfg, ins, opcode);
5906 ins->sreg1 = args [1]->dreg;
5907 ins->inst_destbasereg = args [0]->dreg;
5908 ins->inst_offset = 0;
5909 MONO_ADD_INS (cfg->cbb, ins);
5911 if (opcode == OP_STOREI8_MEMBASE_REG)
5912 ins = mono_decompose_opcode (cfg, ins, NULL);
5917 } else if (cmethod->klass == mono_defaults.monitor_class) {
5918 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
5919 if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 1) {
5922 if (COMPILE_LLVM (cfg)) {
5924 * Pass the argument normally, the LLVM backend will handle the
5925 * calling convention problems.
5927 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
5929 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
5930 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
5931 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
5932 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
5935 return (MonoInst*)call;
5936 #if defined(MONO_ARCH_MONITOR_LOCK_TAKEN_REG)
5937 } else if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 2) {
5940 if (COMPILE_LLVM (cfg)) {
5942 * Pass the argument normally, the LLVM backend will handle the
5943 * calling convention problems.
5945 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER_V4, NULL, helper_sig_monitor_enter_v4_trampoline_llvm, args);
5947 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER_V4,
5948 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
5949 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg, MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
5950 mono_call_inst_add_outarg_reg (cfg, call, args [1]->dreg, MONO_ARCH_MONITOR_LOCK_TAKEN_REG, FALSE);
5953 return (MonoInst*)call;
5955 } else if (strcmp (cmethod->name, "Exit") == 0 && fsig->param_count == 1) {
5958 if (COMPILE_LLVM (cfg)) {
5959 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
5961 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
5962 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
5963 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
5964 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
5967 return (MonoInst*)call;
5970 } else if (cmethod->klass->image == mono_defaults.corlib &&
5971 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
5972 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
5975 #if SIZEOF_REGISTER == 8
5976 if (strcmp (cmethod->name, "Read") == 0 && fsig->param_count == 1 && (fsig->params [0]->type == MONO_TYPE_I8)) {
5977 if (mono_arch_opcode_supported (OP_ATOMIC_LOAD_I8)) {
5978 MONO_INST_NEW (cfg, ins, OP_ATOMIC_LOAD_I8);
5979 ins->dreg = mono_alloc_preg (cfg);
5980 ins->sreg1 = args [0]->dreg;
5981 ins->type = STACK_I8;
5982 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_SEQ;
5983 MONO_ADD_INS (cfg->cbb, ins);
5987 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5989 /* 64 bit reads are already atomic */
5990 MONO_INST_NEW (cfg, load_ins, OP_LOADI8_MEMBASE);
5991 load_ins->dreg = mono_alloc_preg (cfg);
5992 load_ins->inst_basereg = args [0]->dreg;
5993 load_ins->inst_offset = 0;
5994 load_ins->type = STACK_I8;
5995 MONO_ADD_INS (cfg->cbb, load_ins);
5997 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6004 if (strcmp (cmethod->name, "Increment") == 0 && fsig->param_count == 1) {
6005 MonoInst *ins_iconst;
6008 if (fsig->params [0]->type == MONO_TYPE_I4) {
6009 opcode = OP_ATOMIC_ADD_I4;
6010 cfg->has_atomic_add_i4 = TRUE;
6012 #if SIZEOF_REGISTER == 8
6013 else if (fsig->params [0]->type == MONO_TYPE_I8)
6014 opcode = OP_ATOMIC_ADD_I8;
6017 if (!mono_arch_opcode_supported (opcode))
6019 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
6020 ins_iconst->inst_c0 = 1;
6021 ins_iconst->dreg = mono_alloc_ireg (cfg);
6022 MONO_ADD_INS (cfg->cbb, ins_iconst);
6024 MONO_INST_NEW (cfg, ins, opcode);
6025 ins->dreg = mono_alloc_ireg (cfg);
6026 ins->inst_basereg = args [0]->dreg;
6027 ins->inst_offset = 0;
6028 ins->sreg2 = ins_iconst->dreg;
6029 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
6030 MONO_ADD_INS (cfg->cbb, ins);
6032 } else if (strcmp (cmethod->name, "Decrement") == 0 && fsig->param_count == 1) {
6033 MonoInst *ins_iconst;
6036 if (fsig->params [0]->type == MONO_TYPE_I4) {
6037 opcode = OP_ATOMIC_ADD_I4;
6038 cfg->has_atomic_add_i4 = TRUE;
6040 #if SIZEOF_REGISTER == 8
6041 else if (fsig->params [0]->type == MONO_TYPE_I8)
6042 opcode = OP_ATOMIC_ADD_I8;
6045 if (!mono_arch_opcode_supported (opcode))
6047 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
6048 ins_iconst->inst_c0 = -1;
6049 ins_iconst->dreg = mono_alloc_ireg (cfg);
6050 MONO_ADD_INS (cfg->cbb, ins_iconst);
6052 MONO_INST_NEW (cfg, ins, opcode);
6053 ins->dreg = mono_alloc_ireg (cfg);
6054 ins->inst_basereg = args [0]->dreg;
6055 ins->inst_offset = 0;
6056 ins->sreg2 = ins_iconst->dreg;
6057 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
6058 MONO_ADD_INS (cfg->cbb, ins);
6060 } else if (strcmp (cmethod->name, "Add") == 0 && fsig->param_count == 2) {
6063 if (fsig->params [0]->type == MONO_TYPE_I4) {
6064 opcode = OP_ATOMIC_ADD_I4;
6065 cfg->has_atomic_add_i4 = TRUE;
6067 #if SIZEOF_REGISTER == 8
6068 else if (fsig->params [0]->type == MONO_TYPE_I8)
6069 opcode = OP_ATOMIC_ADD_I8;
6072 if (!mono_arch_opcode_supported (opcode))
6074 MONO_INST_NEW (cfg, ins, opcode);
6075 ins->dreg = mono_alloc_ireg (cfg);
6076 ins->inst_basereg = args [0]->dreg;
6077 ins->inst_offset = 0;
6078 ins->sreg2 = args [1]->dreg;
6079 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
6080 MONO_ADD_INS (cfg->cbb, ins);
6083 else if (strcmp (cmethod->name, "Exchange") == 0 && fsig->param_count == 2) {
6084 MonoInst *f2i = NULL, *i2f;
6085 guint32 opcode, f2i_opcode, i2f_opcode;
6086 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [0]);
6087 gboolean is_float = fsig->params [0]->type == MONO_TYPE_R4 || fsig->params [0]->type == MONO_TYPE_R8;
6089 if (fsig->params [0]->type == MONO_TYPE_I4 ||
6090 fsig->params [0]->type == MONO_TYPE_R4) {
6091 opcode = OP_ATOMIC_EXCHANGE_I4;
6092 f2i_opcode = OP_MOVE_F_TO_I4;
6093 i2f_opcode = OP_MOVE_I4_TO_F;
6094 cfg->has_atomic_exchange_i4 = TRUE;
6096 #if SIZEOF_REGISTER == 8
6098 fsig->params [0]->type == MONO_TYPE_I8 ||
6099 fsig->params [0]->type == MONO_TYPE_R8 ||
6100 fsig->params [0]->type == MONO_TYPE_I) {
6101 opcode = OP_ATOMIC_EXCHANGE_I8;
6102 f2i_opcode = OP_MOVE_F_TO_I8;
6103 i2f_opcode = OP_MOVE_I8_TO_F;
6106 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I) {
6107 opcode = OP_ATOMIC_EXCHANGE_I4;
6108 cfg->has_atomic_exchange_i4 = TRUE;
6114 if (!mono_arch_opcode_supported (opcode))
6118 /* TODO: Decompose these opcodes instead of bailing here. */
6119 if (COMPILE_SOFT_FLOAT (cfg))
6122 MONO_INST_NEW (cfg, f2i, f2i_opcode);
6123 f2i->dreg = mono_alloc_ireg (cfg);
6124 f2i->sreg1 = args [1]->dreg;
6125 if (f2i_opcode == OP_MOVE_F_TO_I4)
6126 f2i->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6127 MONO_ADD_INS (cfg->cbb, f2i);
6130 MONO_INST_NEW (cfg, ins, opcode);
6131 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : mono_alloc_ireg (cfg);
6132 ins->inst_basereg = args [0]->dreg;
6133 ins->inst_offset = 0;
6134 ins->sreg2 = is_float ? f2i->dreg : args [1]->dreg;
6135 MONO_ADD_INS (cfg->cbb, ins);
6137 switch (fsig->params [0]->type) {
6139 ins->type = STACK_I4;
6142 ins->type = STACK_I8;
6145 #if SIZEOF_REGISTER == 8
6146 ins->type = STACK_I8;
6148 ins->type = STACK_I4;
6153 ins->type = STACK_R8;
6156 g_assert (mini_type_is_reference (cfg, fsig->params [0]));
6157 ins->type = STACK_OBJ;
6162 MONO_INST_NEW (cfg, i2f, i2f_opcode);
6163 i2f->dreg = mono_alloc_freg (cfg);
6164 i2f->sreg1 = ins->dreg;
6165 i2f->type = STACK_R8;
6166 if (i2f_opcode == OP_MOVE_I4_TO_F)
6167 i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6168 MONO_ADD_INS (cfg->cbb, i2f);
6173 if (cfg->gen_write_barriers && is_ref)
6174 emit_write_barrier (cfg, args [0], args [1]);
6176 else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 3) {
6177 MonoInst *f2i_new = NULL, *f2i_cmp = NULL, *i2f;
6178 guint32 opcode, f2i_opcode, i2f_opcode;
6179 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [1]);
6180 gboolean is_float = fsig->params [1]->type == MONO_TYPE_R4 || fsig->params [1]->type == MONO_TYPE_R8;
6182 if (fsig->params [1]->type == MONO_TYPE_I4 ||
6183 fsig->params [1]->type == MONO_TYPE_R4) {
6184 opcode = OP_ATOMIC_CAS_I4;
6185 f2i_opcode = OP_MOVE_F_TO_I4;
6186 i2f_opcode = OP_MOVE_I4_TO_F;
6187 cfg->has_atomic_cas_i4 = TRUE;
6189 #if SIZEOF_REGISTER == 8
6191 fsig->params [1]->type == MONO_TYPE_I8 ||
6192 fsig->params [1]->type == MONO_TYPE_R8 ||
6193 fsig->params [1]->type == MONO_TYPE_I) {
6194 opcode = OP_ATOMIC_CAS_I8;
6195 f2i_opcode = OP_MOVE_F_TO_I8;
6196 i2f_opcode = OP_MOVE_I8_TO_F;
6199 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I) {
6200 opcode = OP_ATOMIC_CAS_I4;
6201 cfg->has_atomic_cas_i4 = TRUE;
6207 if (!mono_arch_opcode_supported (opcode))
6211 /* TODO: Decompose these opcodes instead of bailing here. */
6212 if (COMPILE_SOFT_FLOAT (cfg))
6215 MONO_INST_NEW (cfg, f2i_new, f2i_opcode);
6216 f2i_new->dreg = mono_alloc_ireg (cfg);
6217 f2i_new->sreg1 = args [1]->dreg;
6218 if (f2i_opcode == OP_MOVE_F_TO_I4)
6219 f2i_new->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6220 MONO_ADD_INS (cfg->cbb, f2i_new);
6222 MONO_INST_NEW (cfg, f2i_cmp, f2i_opcode);
6223 f2i_cmp->dreg = mono_alloc_ireg (cfg);
6224 f2i_cmp->sreg1 = args [2]->dreg;
6225 if (f2i_opcode == OP_MOVE_F_TO_I4)
6226 f2i_cmp->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6227 MONO_ADD_INS (cfg->cbb, f2i_cmp);
6230 MONO_INST_NEW (cfg, ins, opcode);
6231 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
6232 ins->sreg1 = args [0]->dreg;
6233 ins->sreg2 = is_float ? f2i_new->dreg : args [1]->dreg;
6234 ins->sreg3 = is_float ? f2i_cmp->dreg : args [2]->dreg;
6235 MONO_ADD_INS (cfg->cbb, ins);
6237 switch (fsig->params [1]->type) {
6239 ins->type = STACK_I4;
6242 ins->type = STACK_I8;
6245 #if SIZEOF_REGISTER == 8
6246 ins->type = STACK_I8;
6248 ins->type = STACK_I4;
6253 ins->type = STACK_R8;
6256 g_assert (mini_type_is_reference (cfg, fsig->params [1]));
6257 ins->type = STACK_OBJ;
6262 MONO_INST_NEW (cfg, i2f, i2f_opcode);
6263 i2f->dreg = mono_alloc_freg (cfg);
6264 i2f->sreg1 = ins->dreg;
6265 i2f->type = STACK_R8;
6266 if (i2f_opcode == OP_MOVE_I4_TO_F)
6267 i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6268 MONO_ADD_INS (cfg->cbb, i2f);
6273 if (cfg->gen_write_barriers && is_ref)
6274 emit_write_barrier (cfg, args [0], args [1]);
6276 else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 4 &&
6277 fsig->params [1]->type == MONO_TYPE_I4) {
6278 MonoInst *cmp, *ceq;
6280 if (!mono_arch_opcode_supported (OP_ATOMIC_CAS_I4))
6283 /* int32 r = CAS (location, value, comparand); */
6284 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
6285 ins->dreg = alloc_ireg (cfg);
6286 ins->sreg1 = args [0]->dreg;
6287 ins->sreg2 = args [1]->dreg;
6288 ins->sreg3 = args [2]->dreg;
6289 ins->type = STACK_I4;
6290 MONO_ADD_INS (cfg->cbb, ins);
6292 /* bool result = r == comparand; */
6293 MONO_INST_NEW (cfg, cmp, OP_ICOMPARE);
6294 cmp->sreg1 = ins->dreg;
6295 cmp->sreg2 = args [2]->dreg;
6296 cmp->type = STACK_I4;
6297 MONO_ADD_INS (cfg->cbb, cmp);
6299 MONO_INST_NEW (cfg, ceq, OP_ICEQ);
6300 ceq->dreg = alloc_ireg (cfg);
6301 ceq->type = STACK_I4;
6302 MONO_ADD_INS (cfg->cbb, ceq);
6304 /* *success = result; */
6305 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, args [3]->dreg, 0, ceq->dreg);
6307 cfg->has_atomic_cas_i4 = TRUE;
6309 else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0)
6310 ins = emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6314 } else if (cmethod->klass->image == mono_defaults.corlib &&
6315 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
6316 (strcmp (cmethod->klass->name, "Volatile") == 0)) {
6319 if (!strcmp (cmethod->name, "Read") && fsig->param_count == 1) {
6321 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [0]);
6322 gboolean is_float = fsig->params [0]->type == MONO_TYPE_R4 || fsig->params [0]->type == MONO_TYPE_R8;
6324 if (fsig->params [0]->type == MONO_TYPE_I1)
6325 opcode = OP_ATOMIC_LOAD_I1;
6326 else if (fsig->params [0]->type == MONO_TYPE_U1 || fsig->params [0]->type == MONO_TYPE_BOOLEAN)
6327 opcode = OP_ATOMIC_LOAD_U1;
6328 else if (fsig->params [0]->type == MONO_TYPE_I2)
6329 opcode = OP_ATOMIC_LOAD_I2;
6330 else if (fsig->params [0]->type == MONO_TYPE_U2)
6331 opcode = OP_ATOMIC_LOAD_U2;
6332 else if (fsig->params [0]->type == MONO_TYPE_I4)
6333 opcode = OP_ATOMIC_LOAD_I4;
6334 else if (fsig->params [0]->type == MONO_TYPE_U4)
6335 opcode = OP_ATOMIC_LOAD_U4;
6336 else if (fsig->params [0]->type == MONO_TYPE_R4)
6337 opcode = OP_ATOMIC_LOAD_R4;
6338 else if (fsig->params [0]->type == MONO_TYPE_R8)
6339 opcode = OP_ATOMIC_LOAD_R8;
6340 #if SIZEOF_REGISTER == 8
6341 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_I)
6342 opcode = OP_ATOMIC_LOAD_I8;
6343 else if (is_ref || fsig->params [0]->type == MONO_TYPE_U8 || fsig->params [0]->type == MONO_TYPE_U)
6344 opcode = OP_ATOMIC_LOAD_U8;
6346 else if (fsig->params [0]->type == MONO_TYPE_I)
6347 opcode = OP_ATOMIC_LOAD_I4;
6348 else if (is_ref || fsig->params [0]->type == MONO_TYPE_U)
6349 opcode = OP_ATOMIC_LOAD_U4;
6353 if (!mono_arch_opcode_supported (opcode))
6356 MONO_INST_NEW (cfg, ins, opcode);
6357 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : (is_float ? mono_alloc_freg (cfg) : mono_alloc_ireg (cfg));
6358 ins->sreg1 = args [0]->dreg;
6359 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_ACQ;
6360 MONO_ADD_INS (cfg->cbb, ins);
6362 switch (fsig->params [0]->type) {
6363 case MONO_TYPE_BOOLEAN:
6370 ins->type = STACK_I4;
6374 ins->type = STACK_I8;
6378 #if SIZEOF_REGISTER == 8
6379 ins->type = STACK_I8;
6381 ins->type = STACK_I4;
6386 ins->type = STACK_R8;
6389 g_assert (mini_type_is_reference (cfg, fsig->params [0]));
6390 ins->type = STACK_OBJ;
6396 if (!strcmp (cmethod->name, "Write") && fsig->param_count == 2) {
6398 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [0]);
6400 if (fsig->params [0]->type == MONO_TYPE_I1)
6401 opcode = OP_ATOMIC_STORE_I1;
6402 else if (fsig->params [0]->type == MONO_TYPE_U1 || fsig->params [0]->type == MONO_TYPE_BOOLEAN)
6403 opcode = OP_ATOMIC_STORE_U1;
6404 else if (fsig->params [0]->type == MONO_TYPE_I2)
6405 opcode = OP_ATOMIC_STORE_I2;
6406 else if (fsig->params [0]->type == MONO_TYPE_U2)
6407 opcode = OP_ATOMIC_STORE_U2;
6408 else if (fsig->params [0]->type == MONO_TYPE_I4)
6409 opcode = OP_ATOMIC_STORE_I4;
6410 else if (fsig->params [0]->type == MONO_TYPE_U4)
6411 opcode = OP_ATOMIC_STORE_U4;
6412 else if (fsig->params [0]->type == MONO_TYPE_R4)
6413 opcode = OP_ATOMIC_STORE_R4;
6414 else if (fsig->params [0]->type == MONO_TYPE_R8)
6415 opcode = OP_ATOMIC_STORE_R8;
6416 #if SIZEOF_REGISTER == 8
6417 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_I)
6418 opcode = OP_ATOMIC_STORE_I8;
6419 else if (is_ref || fsig->params [0]->type == MONO_TYPE_U8 || fsig->params [0]->type == MONO_TYPE_U)
6420 opcode = OP_ATOMIC_STORE_U8;
6422 else if (fsig->params [0]->type == MONO_TYPE_I)
6423 opcode = OP_ATOMIC_STORE_I4;
6424 else if (is_ref || fsig->params [0]->type == MONO_TYPE_U)
6425 opcode = OP_ATOMIC_STORE_U4;
6429 if (!mono_arch_opcode_supported (opcode))
6432 MONO_INST_NEW (cfg, ins, opcode);
6433 ins->dreg = args [0]->dreg;
6434 ins->sreg1 = args [1]->dreg;
6435 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_REL;
6436 MONO_ADD_INS (cfg->cbb, ins);
6438 if (cfg->gen_write_barriers && is_ref)
6439 emit_write_barrier (cfg, args [0], args [1]);
6445 } else if (cmethod->klass->image == mono_defaults.corlib &&
6446 (strcmp (cmethod->klass->name_space, "System.Diagnostics") == 0) &&
6447 (strcmp (cmethod->klass->name, "Debugger") == 0)) {
6448 if (!strcmp (cmethod->name, "Break") && fsig->param_count == 0) {
6449 if (should_insert_brekpoint (cfg->method)) {
6450 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
6452 MONO_INST_NEW (cfg, ins, OP_NOP);
6453 MONO_ADD_INS (cfg->cbb, ins);
6457 } else if (cmethod->klass->image == mono_defaults.corlib &&
6458 (strcmp (cmethod->klass->name_space, "System") == 0) &&
6459 (strcmp (cmethod->klass->name, "Environment") == 0)) {
6460 if (!strcmp (cmethod->name, "get_IsRunningOnWindows") && fsig->param_count == 0) {
6462 EMIT_NEW_ICONST (cfg, ins, 1);
6464 EMIT_NEW_ICONST (cfg, ins, 0);
6467 } else if (cmethod->klass == mono_defaults.math_class) {
6469 * There is general branchless code for Min/Max, but it does not work for
6471 * http://everything2.com/?node_id=1051618
6473 } else if ((!strcmp (cmethod->klass->image->assembly->aname.name, "MonoMac") ||
6474 !strcmp (cmethod->klass->image->assembly->aname.name, "monotouch")) &&
6475 !strcmp (cmethod->klass->name_space, "XamCore.ObjCRuntime") &&
6476 !strcmp (cmethod->klass->name, "Selector")) {
6477 #ifdef MONO_ARCH_HAVE_OBJC_GET_SELECTOR
6478 if (!strcmp (cmethod->klass->name, "GetHandle") && fsig->param_count == 1 &&
6479 (args [0]->opcode == OP_GOT_ENTRY || args [0]->opcode == OP_AOTCONST) &&
6482 MonoJumpInfoToken *ji;
6485 cfg->disable_llvm = TRUE;
6487 if (args [0]->opcode == OP_GOT_ENTRY) {
6488 pi = args [0]->inst_p1;
6489 g_assert (pi->opcode == OP_PATCH_INFO);
6490 g_assert (GPOINTER_TO_INT (pi->inst_p1) == MONO_PATCH_INFO_LDSTR);
6493 g_assert (GPOINTER_TO_INT (args [0]->inst_p1) == MONO_PATCH_INFO_LDSTR);
6494 ji = args [0]->inst_p0;
6497 NULLIFY_INS (args [0]);
6500 s = mono_ldstr (cfg->domain, ji->image, mono_metadata_token_index (ji->token));
6501 MONO_INST_NEW (cfg, ins, OP_OBJC_GET_SELECTOR);
6502 ins->dreg = mono_alloc_ireg (cfg);
6504 ins->inst_p0 = mono_string_to_utf8 (s);
6505 MONO_ADD_INS (cfg->cbb, ins);
6511 #ifdef MONO_ARCH_SIMD_INTRINSICS
6512 if (cfg->opt & MONO_OPT_SIMD) {
6513 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
6519 ins = mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
6523 if (COMPILE_LLVM (cfg)) {
6524 ins = llvm_emit_inst_for_method (cfg, cmethod, fsig, args);
6529 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
6533 * This entry point could be used later for arbitrary method
6536 inline static MonoInst*
6537 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
6538 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
6540 if (method->klass == mono_defaults.string_class) {
6541 /* managed string allocation support */
6542 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS) && !(cfg->opt & MONO_OPT_SHARED)) {
6543 MonoInst *iargs [2];
6544 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
6545 MonoMethod *managed_alloc = NULL;
6547 g_assert (vtable); /*Should not fail since it System.String*/
6548 #ifndef MONO_CROSS_COMPILE
6549 managed_alloc = mono_gc_get_managed_allocator (method->klass, FALSE, FALSE);
6553 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
6554 iargs [1] = args [0];
6555 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
6562 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
6564 MonoInst *store, *temp;
6567 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
6568 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
6571 * FIXME: We should use *args++ = sp [0], but that would mean the arg
6572 * would be different than the MonoInst's used to represent arguments, and
6573 * the ldelema implementation can't deal with that.
6574 * Solution: When ldelema is used on an inline argument, create a var for
6575 * it, emit ldelema on that var, and emit the saving code below in
6576 * inline_method () if needed.
6578 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
6579 cfg->args [i] = temp;
6580 /* This uses cfg->args [i] which is set by the preceeding line */
6581 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
6582 store->cil_code = sp [0]->cil_code;
6587 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
6588 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
6590 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
6592 check_inline_called_method_name_limit (MonoMethod *called_method)
6595 static const char *limit = NULL;
6597 if (limit == NULL) {
6598 const char *limit_string = g_getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
6600 if (limit_string != NULL)
6601 limit = limit_string;
6606 if (limit [0] != '\0') {
6607 char *called_method_name = mono_method_full_name (called_method, TRUE);
6609 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
6610 g_free (called_method_name);
6612 //return (strncmp_result <= 0);
6613 return (strncmp_result == 0);
6620 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
6622 check_inline_caller_method_name_limit (MonoMethod *caller_method)
6625 static const char *limit = NULL;
6627 if (limit == NULL) {
6628 const char *limit_string = g_getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
6629 if (limit_string != NULL) {
6630 limit = limit_string;
6636 if (limit [0] != '\0') {
6637 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
6639 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
6640 g_free (caller_method_name);
6642 //return (strncmp_result <= 0);
6643 return (strncmp_result == 0);
6651 emit_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
6653 static double r8_0 = 0.0;
6654 static float r4_0 = 0.0;
6658 rtype = mini_get_underlying_type (cfg, rtype);
6662 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
6663 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
6664 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
6665 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
6666 MONO_EMIT_NEW_I8CONST (cfg, dreg, 0);
6667 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
6668 MONO_INST_NEW (cfg, ins, OP_R4CONST);
6669 ins->type = STACK_R4;
6670 ins->inst_p0 = (void*)&r4_0;
6672 MONO_ADD_INS (cfg->cbb, ins);
6673 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
6674 MONO_INST_NEW (cfg, ins, OP_R8CONST);
6675 ins->type = STACK_R8;
6676 ins->inst_p0 = (void*)&r8_0;
6678 MONO_ADD_INS (cfg->cbb, ins);
6679 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
6680 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
6681 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
6682 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (cfg, rtype)) {
6683 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
6685 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
6690 emit_dummy_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
6694 rtype = mini_get_underlying_type (cfg, rtype);
6698 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_PCONST);
6699 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
6700 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_ICONST);
6701 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
6702 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_I8CONST);
6703 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
6704 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R4CONST);
6705 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
6706 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R8CONST);
6707 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
6708 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
6709 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
6710 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (cfg, rtype)) {
6711 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
6713 emit_init_rvar (cfg, dreg, rtype);
6717 /* If INIT is FALSE, emit dummy initialization statements to keep the IR valid */
6719 emit_init_local (MonoCompile *cfg, int local, MonoType *type, gboolean init)
6721 MonoInst *var = cfg->locals [local];
6722 if (COMPILE_SOFT_FLOAT (cfg)) {
6724 int reg = alloc_dreg (cfg, var->type);
6725 emit_init_rvar (cfg, reg, type);
6726 EMIT_NEW_LOCSTORE (cfg, store, local, cfg->cbb->last_ins);
6729 emit_init_rvar (cfg, var->dreg, type);
6731 emit_dummy_init_rvar (cfg, var->dreg, type);
6738 * Return the cost of inlining CMETHOD.
6741 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
6742 guchar *ip, guint real_offset, gboolean inline_always, MonoBasicBlock **out_cbb)
6744 MonoInst *ins, *rvar = NULL;
6745 MonoMethodHeader *cheader;
6746 MonoBasicBlock *ebblock, *sbblock;
6748 MonoMethod *prev_inlined_method;
6749 MonoInst **prev_locals, **prev_args;
6750 MonoType **prev_arg_types;
6751 guint prev_real_offset;
6752 GHashTable *prev_cbb_hash;
6753 MonoBasicBlock **prev_cil_offset_to_bb;
6754 MonoBasicBlock *prev_cbb;
6755 unsigned char* prev_cil_start;
6756 guint32 prev_cil_offset_to_bb_len;
6757 MonoMethod *prev_current_method;
6758 MonoGenericContext *prev_generic_context;
6759 gboolean ret_var_set, prev_ret_var_set, prev_disable_inline, virtual = FALSE;
6761 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
6763 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
6764 if ((! inline_always) && ! check_inline_called_method_name_limit (cmethod))
6767 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
6768 if ((! inline_always) && ! check_inline_caller_method_name_limit (cfg->method))
6773 fsig = mono_method_signature (cmethod);
6775 if (cfg->verbose_level > 2)
6776 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
6778 if (!cmethod->inline_info) {
6779 cfg->stat_inlineable_methods++;
6780 cmethod->inline_info = 1;
6783 /* allocate local variables */
6784 cheader = mono_method_get_header (cmethod);
6786 if (cheader == NULL || mono_loader_get_last_error ()) {
6787 MonoLoaderError *error = mono_loader_get_last_error ();
6790 mono_metadata_free_mh (cheader);
6791 if (inline_always && error)
6792 mono_cfg_set_exception (cfg, error->exception_type);
6794 mono_loader_clear_error ();
6798 /*Must verify before creating locals as it can cause the JIT to assert.*/
6799 if (mono_compile_is_broken (cfg, cmethod, FALSE)) {
6800 mono_metadata_free_mh (cheader);
6804 /* allocate space to store the return value */
6805 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6806 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
6809 prev_locals = cfg->locals;
6810 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
6811 for (i = 0; i < cheader->num_locals; ++i)
6812 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
6814 /* allocate start and end blocks */
6815 /* This is needed so if the inline is aborted, we can clean up */
6816 NEW_BBLOCK (cfg, sbblock);
6817 sbblock->real_offset = real_offset;
6819 NEW_BBLOCK (cfg, ebblock);
6820 ebblock->block_num = cfg->num_bblocks++;
6821 ebblock->real_offset = real_offset;
6823 prev_args = cfg->args;
6824 prev_arg_types = cfg->arg_types;
6825 prev_inlined_method = cfg->inlined_method;
6826 cfg->inlined_method = cmethod;
6827 cfg->ret_var_set = FALSE;
6828 cfg->inline_depth ++;
6829 prev_real_offset = cfg->real_offset;
6830 prev_cbb_hash = cfg->cbb_hash;
6831 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
6832 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
6833 prev_cil_start = cfg->cil_start;
6834 prev_cbb = cfg->cbb;
6835 prev_current_method = cfg->current_method;
6836 prev_generic_context = cfg->generic_context;
6837 prev_ret_var_set = cfg->ret_var_set;
6838 prev_disable_inline = cfg->disable_inline;
6840 if (ip && *ip == CEE_CALLVIRT && !(cmethod->flags & METHOD_ATTRIBUTE_STATIC))
6843 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, sp, real_offset, virtual);
6845 ret_var_set = cfg->ret_var_set;
6847 cfg->inlined_method = prev_inlined_method;
6848 cfg->real_offset = prev_real_offset;
6849 cfg->cbb_hash = prev_cbb_hash;
6850 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
6851 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
6852 cfg->cil_start = prev_cil_start;
6853 cfg->locals = prev_locals;
6854 cfg->args = prev_args;
6855 cfg->arg_types = prev_arg_types;
6856 cfg->current_method = prev_current_method;
6857 cfg->generic_context = prev_generic_context;
6858 cfg->ret_var_set = prev_ret_var_set;
6859 cfg->disable_inline = prev_disable_inline;
6860 cfg->inline_depth --;
6862 if ((costs >= 0 && costs < 60) || inline_always) {
6863 if (cfg->verbose_level > 2)
6864 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
6866 cfg->stat_inlined_methods++;
6868 /* always add some code to avoid block split failures */
6869 MONO_INST_NEW (cfg, ins, OP_NOP);
6870 MONO_ADD_INS (prev_cbb, ins);
6872 prev_cbb->next_bb = sbblock;
6873 link_bblock (cfg, prev_cbb, sbblock);
6876 * Get rid of the begin and end bblocks if possible to aid local
6879 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
6881 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
6882 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
6884 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
6885 MonoBasicBlock *prev = ebblock->in_bb [0];
6886 mono_merge_basic_blocks (cfg, prev, ebblock);
6888 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
6889 mono_merge_basic_blocks (cfg, prev_cbb, prev);
6890 cfg->cbb = prev_cbb;
6894 * Its possible that the rvar is set in some prev bblock, but not in others.
6900 for (i = 0; i < ebblock->in_count; ++i) {
6901 bb = ebblock->in_bb [i];
6903 if (bb->last_ins && bb->last_ins->opcode == OP_NOT_REACHED) {
6906 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
6915 *out_cbb = cfg->cbb;
6919 * If the inlined method contains only a throw, then the ret var is not
6920 * set, so set it to a dummy value.
6923 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
6925 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
6928 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
6931 if (cfg->verbose_level > 2)
6932 printf ("INLINE ABORTED %s (cost %d)\n", mono_method_full_name (cmethod, TRUE), costs);
6933 cfg->exception_type = MONO_EXCEPTION_NONE;
6934 mono_loader_clear_error ();
6936 /* This gets rid of the newly added bblocks */
6937 cfg->cbb = prev_cbb;
6939 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
6944 * Some of these comments may well be out-of-date.
6945 * Design decisions: we do a single pass over the IL code (and we do bblock
6946 * splitting/merging in the few cases when it's required: a back jump to an IL
6947 * address that was not already seen as bblock starting point).
6948 * Code is validated as we go (full verification is still better left to metadata/verify.c).
6949 * Complex operations are decomposed in simpler ones right away. We need to let the
6950 * arch-specific code peek and poke inside this process somehow (except when the
6951 * optimizations can take advantage of the full semantic info of coarse opcodes).
6952 * All the opcodes of the form opcode.s are 'normalized' to opcode.
6953 * MonoInst->opcode initially is the IL opcode or some simplification of that
6954 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
6955 * opcode with value bigger than OP_LAST.
6956 * At this point the IR can be handed over to an interpreter, a dumb code generator
6957 * or to the optimizing code generator that will translate it to SSA form.
6959 * Profiling directed optimizations.
6960 * We may compile by default with few or no optimizations and instrument the code
6961 * or the user may indicate what methods to optimize the most either in a config file
6962 * or through repeated runs where the compiler applies offline the optimizations to
6963 * each method and then decides if it was worth it.
6966 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
6967 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
6968 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
6969 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
6970 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
6971 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
6972 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
6973 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) TYPE_LOAD_ERROR ((klass))
6975 /* offset from br.s -> br like opcodes */
6976 #define BIG_BRANCH_OFFSET 13
6979 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
6981 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
6983 return b == NULL || b == bb;
6987 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
6989 unsigned char *ip = start;
6990 unsigned char *target;
6993 MonoBasicBlock *bblock;
6994 const MonoOpcode *opcode;
6997 cli_addr = ip - start;
6998 i = mono_opcode_value ((const guint8 **)&ip, end);
7001 opcode = &mono_opcodes [i];
7002 switch (opcode->argument) {
7003 case MonoInlineNone:
7006 case MonoInlineString:
7007 case MonoInlineType:
7008 case MonoInlineField:
7009 case MonoInlineMethod:
7012 case MonoShortInlineR:
7019 case MonoShortInlineVar:
7020 case MonoShortInlineI:
7023 case MonoShortInlineBrTarget:
7024 target = start + cli_addr + 2 + (signed char)ip [1];
7025 GET_BBLOCK (cfg, bblock, target);
7028 GET_BBLOCK (cfg, bblock, ip);
7030 case MonoInlineBrTarget:
7031 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
7032 GET_BBLOCK (cfg, bblock, target);
7035 GET_BBLOCK (cfg, bblock, ip);
7037 case MonoInlineSwitch: {
7038 guint32 n = read32 (ip + 1);
7041 cli_addr += 5 + 4 * n;
7042 target = start + cli_addr;
7043 GET_BBLOCK (cfg, bblock, target);
7045 for (j = 0; j < n; ++j) {
7046 target = start + cli_addr + (gint32)read32 (ip);
7047 GET_BBLOCK (cfg, bblock, target);
7057 g_assert_not_reached ();
7060 if (i == CEE_THROW) {
7061 unsigned char *bb_start = ip - 1;
7063 /* Find the start of the bblock containing the throw */
7065 while ((bb_start >= start) && !bblock) {
7066 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
7070 bblock->out_of_line = 1;
7080 static inline MonoMethod *
7081 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
7085 if (m->wrapper_type != MONO_WRAPPER_NONE) {
7086 method = mono_method_get_wrapper_data (m, token);
7089 method = mono_class_inflate_generic_method_checked (method, context, &error);
7090 g_assert (mono_error_ok (&error)); /* FIXME don't swallow the error */
7093 method = mono_get_method_full (m->klass->image, token, klass, context);
7099 static inline MonoMethod *
7100 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
7102 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
7104 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
7110 static inline MonoClass*
7111 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
7116 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7117 klass = mono_method_get_wrapper_data (method, token);
7119 klass = mono_class_inflate_generic_class (klass, context);
7121 klass = mono_class_get_and_inflate_typespec_checked (method->klass->image, token, context, &error);
7122 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7125 mono_class_init (klass);
7129 static inline MonoMethodSignature*
7130 mini_get_signature (MonoMethod *method, guint32 token, MonoGenericContext *context)
7132 MonoMethodSignature *fsig;
7134 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7137 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
7139 fsig = mono_inflate_generic_signature (fsig, context, &error);
7141 g_assert (mono_error_ok (&error));
7144 fsig = mono_metadata_parse_signature (method->klass->image, token);
7150 * Returns TRUE if the JIT should abort inlining because "callee"
7151 * is influenced by security attributes.
7154 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
7158 if ((cfg->method != caller) && mono_security_method_has_declsec (callee)) {
7162 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
7163 if (result == MONO_JIT_SECURITY_OK)
7166 if (result == MONO_JIT_LINKDEMAND_ECMA) {
7167 /* Generate code to throw a SecurityException before the actual call/link */
7168 MonoSecurityManager *secman = mono_security_manager_get_methods ();
7171 NEW_ICONST (cfg, args [0], 4);
7172 NEW_METHODCONST (cfg, args [1], caller);
7173 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
7174 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
7175 /* don't hide previous results */
7176 mono_cfg_set_exception (cfg, MONO_EXCEPTION_SECURITY_LINKDEMAND);
7177 cfg->exception_data = result;
7185 throw_exception (void)
7187 static MonoMethod *method = NULL;
7190 MonoSecurityManager *secman = mono_security_manager_get_methods ();
7191 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
7198 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
7200 MonoMethod *thrower = throw_exception ();
7203 EMIT_NEW_PCONST (cfg, args [0], ex);
7204 mono_emit_method_call (cfg, thrower, args, NULL);
7208 * Return the original method is a wrapper is specified. We can only access
7209 * the custom attributes from the original method.
7212 get_original_method (MonoMethod *method)
7214 if (method->wrapper_type == MONO_WRAPPER_NONE)
7217 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
7218 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
7221 /* in other cases we need to find the original method */
7222 return mono_marshal_method_from_wrapper (method);
7226 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
7227 MonoBasicBlock *bblock, unsigned char *ip)
7229 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
7230 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
7232 emit_throw_exception (cfg, ex);
7236 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
7237 MonoBasicBlock *bblock, unsigned char *ip)
7239 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
7240 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
7242 emit_throw_exception (cfg, ex);
7246 * Check that the IL instructions at ip are the array initialization
7247 * sequence and return the pointer to the data and the size.
7250 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
7253 * newarr[System.Int32]
7255 * ldtoken field valuetype ...
7256 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
7258 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
7260 guint32 token = read32 (ip + 7);
7261 guint32 field_token = read32 (ip + 2);
7262 guint32 field_index = field_token & 0xffffff;
7264 const char *data_ptr;
7266 MonoMethod *cmethod;
7267 MonoClass *dummy_class;
7268 MonoClassField *field = mono_field_from_token_checked (method->klass->image, field_token, &dummy_class, NULL, &error);
7272 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7276 *out_field_token = field_token;
7278 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
7281 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
7283 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
7284 case MONO_TYPE_BOOLEAN:
7288 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
7289 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
7290 case MONO_TYPE_CHAR:
7307 if (size > mono_type_size (field->type, &dummy_align))
7310 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
7311 if (!image_is_dynamic (method->klass->image)) {
7312 field_index = read32 (ip + 2) & 0xffffff;
7313 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
7314 data_ptr = mono_image_rva_map (method->klass->image, rva);
7315 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
7316 /* for aot code we do the lookup on load */
7317 if (aot && data_ptr)
7318 return GUINT_TO_POINTER (rva);
7320 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
7322 data_ptr = mono_field_get_data (field);
7330 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
7332 char *method_fname = mono_method_full_name (method, TRUE);
7334 MonoMethodHeader *header = mono_method_get_header (method);
7336 if (header->code_size == 0)
7337 method_code = g_strdup ("method body is empty.");
7339 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
7340 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
7341 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
7342 g_free (method_fname);
7343 g_free (method_code);
7344 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
7348 set_exception_object (MonoCompile *cfg, MonoException *exception)
7350 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OBJECT_SUPPLIED);
7351 MONO_GC_REGISTER_ROOT_SINGLE (cfg->exception_ptr);
7352 cfg->exception_ptr = exception;
7356 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
7359 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
7360 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
7361 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
7362 /* Optimize reg-reg moves away */
7364 * Can't optimize other opcodes, since sp[0] might point to
7365 * the last ins of a decomposed opcode.
7367 sp [0]->dreg = (cfg)->locals [n]->dreg;
7369 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
7374 * ldloca inhibits many optimizations so try to get rid of it in common
7377 static inline unsigned char *
7378 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
7388 local = read16 (ip + 2);
7392 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
7393 /* From the INITOBJ case */
7394 token = read32 (ip + 2);
7395 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
7396 CHECK_TYPELOAD (klass);
7397 type = mini_get_underlying_type (cfg, &klass->byval_arg);
7398 emit_init_local (cfg, local, type, TRUE);
7406 is_exception_class (MonoClass *class)
7409 if (class == mono_defaults.exception_class)
7411 class = class->parent;
7417 * is_jit_optimizer_disabled:
7419 * Determine whenever M's assembly has a DebuggableAttribute with the
7420 * IsJITOptimizerDisabled flag set.
7423 is_jit_optimizer_disabled (MonoMethod *m)
7425 MonoAssembly *ass = m->klass->image->assembly;
7426 MonoCustomAttrInfo* attrs;
7427 static MonoClass *klass;
7429 gboolean val = FALSE;
7432 if (ass->jit_optimizer_disabled_inited)
7433 return ass->jit_optimizer_disabled;
7436 klass = mono_class_from_name (mono_defaults.corlib, "System.Diagnostics", "DebuggableAttribute");
7439 ass->jit_optimizer_disabled = FALSE;
7440 mono_memory_barrier ();
7441 ass->jit_optimizer_disabled_inited = TRUE;
7445 attrs = mono_custom_attrs_from_assembly (ass);
7447 for (i = 0; i < attrs->num_attrs; ++i) {
7448 MonoCustomAttrEntry *attr = &attrs->attrs [i];
7450 MonoMethodSignature *sig;
7452 if (!attr->ctor || attr->ctor->klass != klass)
7454 /* Decode the attribute. See reflection.c */
7455 p = (const char*)attr->data;
7456 g_assert (read16 (p) == 0x0001);
7459 // FIXME: Support named parameters
7460 sig = mono_method_signature (attr->ctor);
7461 if (sig->param_count != 2 || sig->params [0]->type != MONO_TYPE_BOOLEAN || sig->params [1]->type != MONO_TYPE_BOOLEAN)
7463 /* Two boolean arguments */
7467 mono_custom_attrs_free (attrs);
7470 ass->jit_optimizer_disabled = val;
7471 mono_memory_barrier ();
7472 ass->jit_optimizer_disabled_inited = TRUE;
7478 is_supported_tail_call (MonoCompile *cfg, MonoMethod *method, MonoMethod *cmethod, MonoMethodSignature *fsig, int call_opcode)
7480 gboolean supported_tail_call;
7483 #ifdef MONO_ARCH_HAVE_OP_TAIL_CALL
7484 supported_tail_call = mono_arch_tail_call_supported (cfg, mono_method_signature (method), mono_method_signature (cmethod));
7486 supported_tail_call = mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret);
7489 for (i = 0; i < fsig->param_count; ++i) {
7490 if (fsig->params [i]->byref || fsig->params [i]->type == MONO_TYPE_PTR || fsig->params [i]->type == MONO_TYPE_FNPTR)
7491 /* These can point to the current method's stack */
7492 supported_tail_call = FALSE;
7494 if (fsig->hasthis && cmethod->klass->valuetype)
7495 /* this might point to the current method's stack */
7496 supported_tail_call = FALSE;
7497 if (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)
7498 supported_tail_call = FALSE;
7499 if (cfg->method->save_lmf)
7500 supported_tail_call = FALSE;
7501 if (cmethod->wrapper_type && cmethod->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
7502 supported_tail_call = FALSE;
7503 if (call_opcode != CEE_CALL)
7504 supported_tail_call = FALSE;
7506 /* Debugging support */
7508 if (supported_tail_call) {
7509 if (!mono_debug_count ())
7510 supported_tail_call = FALSE;
7514 return supported_tail_call;
7517 /* the JIT intercepts ldflda instructions to the tlsdata field in ThreadLocal<T> and redirects
7518 * it to the thread local value based on the tls_offset field. Every other kind of access to
7519 * the field causes an assert.
7522 is_magic_tls_access (MonoClassField *field)
7524 if (strcmp (field->name, "tlsdata"))
7526 if (strcmp (field->parent->name, "ThreadLocal`1"))
7528 return field->parent->image == mono_defaults.corlib;
7531 /* emits the code needed to access a managed tls var (like ThreadStatic)
7532 * with the value of the tls offset in offset_reg. thread_ins represents the MonoInternalThread
7533 * pointer for the current thread.
7534 * Returns the MonoInst* representing the address of the tls var.
7537 emit_managed_static_data_access (MonoCompile *cfg, MonoInst *thread_ins, int offset_reg)
7540 int static_data_reg, array_reg, dreg;
7541 int offset2_reg, idx_reg;
7542 // inlined access to the tls data
7543 // idx = (offset >> 24) - 1;
7544 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
7545 static_data_reg = alloc_ireg (cfg);
7546 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, MONO_STRUCT_OFFSET (MonoInternalThread, static_data));
7547 idx_reg = alloc_ireg (cfg);
7548 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
7549 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
7550 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
7551 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
7552 array_reg = alloc_ireg (cfg);
7553 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
7554 offset2_reg = alloc_ireg (cfg);
7555 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
7556 dreg = alloc_ireg (cfg);
7557 EMIT_NEW_BIALU (cfg, addr, OP_PADD, dreg, array_reg, offset2_reg);
7562 * redirect access to the tlsdata field to the tls var given by the tls_offset field.
7563 * this address is cached per-method in cached_tls_addr.
7566 create_magic_tls_access (MonoCompile *cfg, MonoClassField *tls_field, MonoInst **cached_tls_addr, MonoInst *thread_local)
7568 MonoInst *load, *addr, *temp, *store, *thread_ins;
7569 MonoClassField *offset_field;
7571 if (*cached_tls_addr) {
7572 EMIT_NEW_TEMPLOAD (cfg, addr, (*cached_tls_addr)->inst_c0);
7575 thread_ins = mono_get_thread_intrinsic (cfg);
7576 offset_field = mono_class_get_field_from_name (tls_field->parent, "tls_offset");
7578 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, offset_field->type, thread_local->dreg, offset_field->offset);
7580 MONO_ADD_INS (cfg->cbb, thread_ins);
7582 MonoMethod *thread_method;
7583 thread_method = mono_class_get_method_from_name (mono_get_thread_class(), "CurrentInternalThread_internal", 0);
7584 thread_ins = mono_emit_method_call (cfg, thread_method, NULL, NULL);
7586 addr = emit_managed_static_data_access (cfg, thread_ins, load->dreg);
7587 addr->klass = mono_class_from_mono_type (tls_field->type);
7588 addr->type = STACK_MP;
7589 *cached_tls_addr = temp = mono_compile_create_var (cfg, type_from_stack_type (addr), OP_LOCAL);
7590 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, addr);
7592 EMIT_NEW_TEMPLOAD (cfg, addr, temp->inst_c0);
7599 * Handle calls made to ctors from NEWOBJ opcodes.
7601 * REF_BBLOCK will point to the current bblock after the call.
7604 handle_ctor_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used,
7605 MonoInst **sp, guint8 *ip, MonoBasicBlock **ref_bblock, int *inline_costs)
7607 MonoInst *vtable_arg = NULL, *callvirt_this_arg = NULL, *ins;
7608 MonoBasicBlock *bblock = *ref_bblock;
7610 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
7611 mono_method_is_generic_sharable (cmethod, TRUE)) {
7612 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
7613 mono_class_vtable (cfg->domain, cmethod->klass);
7614 CHECK_TYPELOAD (cmethod->klass);
7616 vtable_arg = emit_get_rgctx_method (cfg, context_used,
7617 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7620 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
7621 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7623 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7625 CHECK_TYPELOAD (cmethod->klass);
7626 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7631 /* Avoid virtual calls to ctors if possible */
7632 if (mono_class_is_marshalbyref (cmethod->klass))
7633 callvirt_this_arg = sp [0];
7635 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
7636 g_assert (MONO_TYPE_IS_VOID (fsig->ret));
7637 CHECK_CFG_EXCEPTION;
7638 } else if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
7639 mono_method_check_inlining (cfg, cmethod) &&
7640 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE)) {
7643 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, FALSE, &bblock))) {
7644 cfg->real_offset += 5;
7646 *inline_costs += costs - 5;
7647 *ref_bblock = bblock;
7649 INLINE_FAILURE ("inline failure");
7650 // FIXME-VT: Clean this up
7651 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))
7652 GSHAREDVT_FAILURE(*ip);
7653 mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, callvirt_this_arg, NULL, NULL);
7655 } else if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
7658 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE);
7659 mono_emit_calli (cfg, fsig, sp, addr, NULL, vtable_arg);
7660 } else if (context_used &&
7661 ((!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
7662 !mono_class_generic_sharing_enabled (cmethod->klass)) || cfg->gsharedvt)) {
7663 MonoInst *cmethod_addr;
7665 /* Generic calls made out of gsharedvt methods cannot be patched, so use an indirect call */
7667 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
7668 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7670 mono_emit_calli (cfg, fsig, sp, cmethod_addr, NULL, vtable_arg);
7672 INLINE_FAILURE ("ctor call");
7673 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp,
7674 callvirt_this_arg, NULL, vtable_arg);
7681 * mono_method_to_ir:
7683 * Translate the .net IL into linear IR.
7686 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
7687 MonoInst *return_var, MonoInst **inline_args,
7688 guint inline_offset, gboolean is_virtual_call)
7691 MonoInst *ins, **sp, **stack_start;
7692 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
7693 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
7694 MonoMethod *cmethod, *method_definition;
7695 MonoInst **arg_array;
7696 MonoMethodHeader *header;
7698 guint32 token, ins_flag;
7700 MonoClass *constrained_class = NULL;
7701 unsigned char *ip, *end, *target, *err_pos;
7702 MonoMethodSignature *sig;
7703 MonoGenericContext *generic_context = NULL;
7704 MonoGenericContainer *generic_container = NULL;
7705 MonoType **param_types;
7706 int i, n, start_new_bblock, dreg;
7707 int num_calls = 0, inline_costs = 0;
7708 int breakpoint_id = 0;
7710 MonoBoolean security, pinvoke;
7711 MonoSecurityManager* secman = NULL;
7712 MonoDeclSecurityActions actions;
7713 GSList *class_inits = NULL;
7714 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
7716 gboolean init_locals, seq_points, skip_dead_blocks;
7717 gboolean sym_seq_points = FALSE;
7718 MonoInst *cached_tls_addr = NULL;
7719 MonoDebugMethodInfo *minfo;
7720 MonoBitSet *seq_point_locs = NULL;
7721 MonoBitSet *seq_point_set_locs = NULL;
7723 cfg->disable_inline = is_jit_optimizer_disabled (method);
7725 /* serialization and xdomain stuff may need access to private fields and methods */
7726 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
7727 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
7728 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
7729 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
7730 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
7731 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
7733 dont_verify |= mono_security_smcs_hack_enabled ();
7735 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
7736 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
7737 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
7738 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
7739 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_STELEMREF;
7741 image = method->klass->image;
7742 header = mono_method_get_header (method);
7744 MonoLoaderError *error;
7746 if ((error = mono_loader_get_last_error ())) {
7747 mono_cfg_set_exception (cfg, error->exception_type);
7749 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
7750 cfg->exception_message = g_strdup_printf ("Missing or incorrect header for method %s", cfg->method->name);
7752 goto exception_exit;
7754 generic_container = mono_method_get_generic_container (method);
7755 sig = mono_method_signature (method);
7756 num_args = sig->hasthis + sig->param_count;
7757 ip = (unsigned char*)header->code;
7758 cfg->cil_start = ip;
7759 end = ip + header->code_size;
7760 cfg->stat_cil_code_size += header->code_size;
7762 seq_points = cfg->gen_seq_points && cfg->method == method;
7763 #ifdef PLATFORM_ANDROID
7764 seq_points &= cfg->method->wrapper_type == MONO_WRAPPER_NONE;
7767 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
7768 /* We could hit a seq point before attaching to the JIT (#8338) */
7772 if (cfg->gen_seq_points_debug_data && cfg->method == method) {
7773 minfo = mono_debug_lookup_method (method);
7775 int i, n_il_offsets;
7779 mono_debug_symfile_get_line_numbers_full (minfo, NULL, NULL, &n_il_offsets, &il_offsets, &line_numbers, NULL, NULL, NULL, NULL);
7780 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7781 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7782 sym_seq_points = TRUE;
7783 for (i = 0; i < n_il_offsets; ++i) {
7784 if (il_offsets [i] < header->code_size)
7785 mono_bitset_set_fast (seq_point_locs, il_offsets [i]);
7787 g_free (il_offsets);
7788 g_free (line_numbers);
7789 } else if (!method->wrapper_type && !method->dynamic && mono_debug_image_has_debug_info (method->klass->image)) {
7790 /* Methods without line number info like auto-generated property accessors */
7791 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7792 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7793 sym_seq_points = TRUE;
7798 * Methods without init_locals set could cause asserts in various passes
7799 * (#497220). To work around this, we emit dummy initialization opcodes
7800 * (OP_DUMMY_ICONST etc.) which generate no code. These are only supported
7801 * on some platforms.
7803 if ((cfg->opt & MONO_OPT_UNSAFE) && ARCH_HAVE_DUMMY_INIT)
7804 init_locals = header->init_locals;
7808 method_definition = method;
7809 while (method_definition->is_inflated) {
7810 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
7811 method_definition = imethod->declaring;
7814 /* SkipVerification is not allowed if core-clr is enabled */
7815 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
7817 dont_verify_stloc = TRUE;
7820 if (sig->is_inflated)
7821 generic_context = mono_method_get_context (method);
7822 else if (generic_container)
7823 generic_context = &generic_container->context;
7824 cfg->generic_context = generic_context;
7826 if (!cfg->generic_sharing_context)
7827 g_assert (!sig->has_type_parameters);
7829 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
7830 g_assert (method->is_inflated);
7831 g_assert (mono_method_get_context (method)->method_inst);
7833 if (method->is_inflated && mono_method_get_context (method)->method_inst)
7834 g_assert (sig->generic_param_count);
7836 if (cfg->method == method) {
7837 cfg->real_offset = 0;
7839 cfg->real_offset = inline_offset;
7842 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
7843 cfg->cil_offset_to_bb_len = header->code_size;
7845 cfg->current_method = method;
7847 if (cfg->verbose_level > 2)
7848 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
7850 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
7852 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
7853 for (n = 0; n < sig->param_count; ++n)
7854 param_types [n + sig->hasthis] = sig->params [n];
7855 cfg->arg_types = param_types;
7857 cfg->dont_inline = g_list_prepend (cfg->dont_inline, method);
7858 if (cfg->method == method) {
7860 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
7861 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
7864 NEW_BBLOCK (cfg, start_bblock);
7865 cfg->bb_entry = start_bblock;
7866 start_bblock->cil_code = NULL;
7867 start_bblock->cil_length = 0;
7868 #if defined(__native_client_codegen__)
7869 MONO_INST_NEW (cfg, ins, OP_NACL_GC_SAFE_POINT);
7870 ins->dreg = alloc_dreg (cfg, STACK_I4);
7871 MONO_ADD_INS (start_bblock, ins);
7875 NEW_BBLOCK (cfg, end_bblock);
7876 cfg->bb_exit = end_bblock;
7877 end_bblock->cil_code = NULL;
7878 end_bblock->cil_length = 0;
7879 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
7880 g_assert (cfg->num_bblocks == 2);
7882 arg_array = cfg->args;
7884 if (header->num_clauses) {
7885 cfg->spvars = g_hash_table_new (NULL, NULL);
7886 cfg->exvars = g_hash_table_new (NULL, NULL);
7888 /* handle exception clauses */
7889 for (i = 0; i < header->num_clauses; ++i) {
7890 MonoBasicBlock *try_bb;
7891 MonoExceptionClause *clause = &header->clauses [i];
7892 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
7893 try_bb->real_offset = clause->try_offset;
7894 try_bb->try_start = TRUE;
7895 try_bb->region = ((i + 1) << 8) | clause->flags;
7896 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
7897 tblock->real_offset = clause->handler_offset;
7898 tblock->flags |= BB_EXCEPTION_HANDLER;
7901 * Linking the try block with the EH block hinders inlining as we won't be able to
7902 * merge the bblocks from inlining and produce an artificial hole for no good reason.
7904 if (COMPILE_LLVM (cfg))
7905 link_bblock (cfg, try_bb, tblock);
7907 if (*(ip + clause->handler_offset) == CEE_POP)
7908 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
7910 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
7911 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
7912 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
7913 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
7914 MONO_ADD_INS (tblock, ins);
7916 if (seq_points && clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY) {
7917 /* finally clauses already have a seq point */
7918 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
7919 MONO_ADD_INS (tblock, ins);
7922 /* todo: is a fault block unsafe to optimize? */
7923 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
7924 tblock->flags |= BB_EXCEPTION_UNSAFE;
7928 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
7930 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
7932 /* catch and filter blocks get the exception object on the stack */
7933 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
7934 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
7935 MonoInst *dummy_use;
7937 /* mostly like handle_stack_args (), but just sets the input args */
7938 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
7939 tblock->in_scount = 1;
7940 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
7941 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
7944 * Add a dummy use for the exvar so its liveness info will be
7948 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
7950 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
7951 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
7952 tblock->flags |= BB_EXCEPTION_HANDLER;
7953 tblock->real_offset = clause->data.filter_offset;
7954 tblock->in_scount = 1;
7955 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
7956 /* The filter block shares the exvar with the handler block */
7957 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
7958 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
7959 MONO_ADD_INS (tblock, ins);
7963 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
7964 clause->data.catch_class &&
7965 cfg->generic_sharing_context &&
7966 mono_class_check_context_used (clause->data.catch_class)) {
7968 * In shared generic code with catch
7969 * clauses containing type variables
7970 * the exception handling code has to
7971 * be able to get to the rgctx.
7972 * Therefore we have to make sure that
7973 * the vtable/mrgctx argument (for
7974 * static or generic methods) or the
7975 * "this" argument (for non-static
7976 * methods) are live.
7978 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
7979 mini_method_get_context (method)->method_inst ||
7980 method->klass->valuetype) {
7981 mono_get_vtable_var (cfg);
7983 MonoInst *dummy_use;
7985 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
7990 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
7991 cfg->cbb = start_bblock;
7992 cfg->args = arg_array;
7993 mono_save_args (cfg, sig, inline_args);
7996 /* FIRST CODE BLOCK */
7997 NEW_BBLOCK (cfg, bblock);
7998 bblock->cil_code = ip;
8002 ADD_BBLOCK (cfg, bblock);
8004 if (cfg->method == method) {
8005 breakpoint_id = mono_debugger_method_has_breakpoint (method);
8006 if (breakpoint_id) {
8007 MONO_INST_NEW (cfg, ins, OP_BREAK);
8008 MONO_ADD_INS (bblock, ins);
8012 if (mono_security_cas_enabled ())
8013 secman = mono_security_manager_get_methods ();
8015 security = (secman && mono_security_method_has_declsec (method));
8016 /* at this point having security doesn't mean we have any code to generate */
8017 if (security && (cfg->method == method)) {
8018 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
8019 * And we do not want to enter the next section (with allocation) if we
8020 * have nothing to generate */
8021 security = mono_declsec_get_demands (method, &actions);
8024 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
8025 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
8027 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
8028 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
8029 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
8031 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
8032 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
8036 mono_custom_attrs_free (custom);
8039 custom = mono_custom_attrs_from_class (wrapped->klass);
8040 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
8044 mono_custom_attrs_free (custom);
8047 /* not a P/Invoke after all */
8052 /* we use a separate basic block for the initialization code */
8053 NEW_BBLOCK (cfg, init_localsbb);
8054 cfg->bb_init = init_localsbb;
8055 init_localsbb->real_offset = cfg->real_offset;
8056 start_bblock->next_bb = init_localsbb;
8057 init_localsbb->next_bb = bblock;
8058 link_bblock (cfg, start_bblock, init_localsbb);
8059 link_bblock (cfg, init_localsbb, bblock);
8061 cfg->cbb = init_localsbb;
8063 if (cfg->gsharedvt && cfg->method == method) {
8064 MonoGSharedVtMethodInfo *info;
8065 MonoInst *var, *locals_var;
8068 info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoGSharedVtMethodInfo));
8069 info->method = cfg->method;
8070 info->count_entries = 16;
8071 info->entries = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
8072 cfg->gsharedvt_info = info;
8074 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
8075 /* prevent it from being register allocated */
8076 //var->flags |= MONO_INST_VOLATILE;
8077 cfg->gsharedvt_info_var = var;
8079 ins = emit_get_rgctx_gsharedvt_method (cfg, mini_method_check_context_used (cfg, method), method, info);
8080 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, var->dreg, ins->dreg);
8082 /* Allocate locals */
8083 locals_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
8084 /* prevent it from being register allocated */
8085 //locals_var->flags |= MONO_INST_VOLATILE;
8086 cfg->gsharedvt_locals_var = locals_var;
8088 dreg = alloc_ireg (cfg);
8089 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, dreg, var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, locals_size));
8091 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
8092 ins->dreg = locals_var->dreg;
8094 MONO_ADD_INS (cfg->cbb, ins);
8095 cfg->gsharedvt_locals_var_ins = ins;
8097 cfg->flags |= MONO_CFG_HAS_ALLOCA;
8100 ins->flags |= MONO_INST_INIT;
8104 /* at this point we know, if security is TRUE, that some code needs to be generated */
8105 if (security && (cfg->method == method)) {
8108 cfg->stat_cas_demand_generation++;
8110 if (actions.demand.blob) {
8111 /* Add code for SecurityAction.Demand */
8112 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
8113 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
8114 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
8115 mono_emit_method_call (cfg, secman->demand, args, NULL);
8117 if (actions.noncasdemand.blob) {
8118 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
8119 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
8120 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
8121 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
8122 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
8123 mono_emit_method_call (cfg, secman->demand, args, NULL);
8125 if (actions.demandchoice.blob) {
8126 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
8127 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
8128 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
8129 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
8130 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
8134 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
8136 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
8139 if (mono_security_core_clr_enabled ()) {
8140 /* check if this is native code, e.g. an icall or a p/invoke */
8141 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
8142 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
8144 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
8145 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
8147 /* if this ia a native call then it can only be JITted from platform code */
8148 if ((icall || pinvk) && method->klass && method->klass->image) {
8149 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
8150 MonoException *ex = icall ? mono_get_exception_security () :
8151 mono_get_exception_method_access ();
8152 emit_throw_exception (cfg, ex);
8159 CHECK_CFG_EXCEPTION;
8161 if (header->code_size == 0)
8164 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
8169 if (cfg->method == method)
8170 mono_debug_init_method (cfg, bblock, breakpoint_id);
8172 for (n = 0; n < header->num_locals; ++n) {
8173 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
8178 /* We force the vtable variable here for all shared methods
8179 for the possibility that they might show up in a stack
8180 trace where their exact instantiation is needed. */
8181 if (cfg->generic_sharing_context && method == cfg->method) {
8182 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
8183 mini_method_get_context (method)->method_inst ||
8184 method->klass->valuetype) {
8185 mono_get_vtable_var (cfg);
8187 /* FIXME: Is there a better way to do this?
8188 We need the variable live for the duration
8189 of the whole method. */
8190 cfg->args [0]->flags |= MONO_INST_VOLATILE;
8194 /* add a check for this != NULL to inlined methods */
8195 if (is_virtual_call) {
8198 NEW_ARGLOAD (cfg, arg_ins, 0);
8199 MONO_ADD_INS (cfg->cbb, arg_ins);
8200 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
8203 skip_dead_blocks = !dont_verify;
8204 if (skip_dead_blocks) {
8205 original_bb = bb = mono_basic_block_split (method, &cfg->error);
8210 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
8211 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
8214 start_new_bblock = 0;
8217 if (cfg->method == method)
8218 cfg->real_offset = ip - header->code;
8220 cfg->real_offset = inline_offset;
8225 if (start_new_bblock) {
8226 bblock->cil_length = ip - bblock->cil_code;
8227 if (start_new_bblock == 2) {
8228 g_assert (ip == tblock->cil_code);
8230 GET_BBLOCK (cfg, tblock, ip);
8232 bblock->next_bb = tblock;
8235 start_new_bblock = 0;
8236 for (i = 0; i < bblock->in_scount; ++i) {
8237 if (cfg->verbose_level > 3)
8238 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
8239 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
8243 g_slist_free (class_inits);
8246 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
8247 link_bblock (cfg, bblock, tblock);
8248 if (sp != stack_start) {
8249 handle_stack_args (cfg, stack_start, sp - stack_start);
8251 CHECK_UNVERIFIABLE (cfg);
8253 bblock->next_bb = tblock;
8256 for (i = 0; i < bblock->in_scount; ++i) {
8257 if (cfg->verbose_level > 3)
8258 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
8259 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
8262 g_slist_free (class_inits);
8267 if (skip_dead_blocks) {
8268 int ip_offset = ip - header->code;
8270 if (ip_offset == bb->end)
8274 int op_size = mono_opcode_size (ip, end);
8275 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
8277 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
8279 if (ip_offset + op_size == bb->end) {
8280 MONO_INST_NEW (cfg, ins, OP_NOP);
8281 MONO_ADD_INS (bblock, ins);
8282 start_new_bblock = 1;
8290 * Sequence points are points where the debugger can place a breakpoint.
8291 * Currently, we generate these automatically at points where the IL
8294 if (seq_points && ((!sym_seq_points && (sp == stack_start)) || (sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code)))) {
8296 * Make methods interruptable at the beginning, and at the targets of
8297 * backward branches.
8298 * Also, do this at the start of every bblock in methods with clauses too,
8299 * to be able to handle instructions with inprecise control flow like
8301 * Backward branches are handled at the end of method-to-ir ().
8303 gboolean intr_loc = ip == header->code || (!cfg->cbb->last_ins && cfg->header->num_clauses);
8305 /* Avoid sequence points on empty IL like .volatile */
8306 // FIXME: Enable this
8307 //if (!(cfg->cbb->last_ins && cfg->cbb->last_ins->opcode == OP_SEQ_POINT)) {
8308 NEW_SEQ_POINT (cfg, ins, ip - header->code, intr_loc);
8309 if (sp != stack_start)
8310 ins->flags |= MONO_INST_NONEMPTY_STACK;
8311 MONO_ADD_INS (cfg->cbb, ins);
8314 mono_bitset_set_fast (seq_point_set_locs, ip - header->code);
8317 bblock->real_offset = cfg->real_offset;
8319 if ((cfg->method == method) && cfg->coverage_info) {
8320 guint32 cil_offset = ip - header->code;
8321 cfg->coverage_info->data [cil_offset].cil_code = ip;
8323 /* TODO: Use an increment here */
8324 #if defined(TARGET_X86)
8325 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
8326 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
8328 MONO_ADD_INS (cfg->cbb, ins);
8330 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
8331 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
8335 if (cfg->verbose_level > 3)
8336 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
8340 if (seq_points && !sym_seq_points && sp != stack_start) {
8342 * The C# compiler uses these nops to notify the JIT that it should
8343 * insert seq points.
8345 NEW_SEQ_POINT (cfg, ins, ip - header->code, FALSE);
8346 MONO_ADD_INS (cfg->cbb, ins);
8348 if (cfg->keep_cil_nops)
8349 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
8351 MONO_INST_NEW (cfg, ins, OP_NOP);
8353 MONO_ADD_INS (bblock, ins);
8356 if (should_insert_brekpoint (cfg->method)) {
8357 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
8359 MONO_INST_NEW (cfg, ins, OP_NOP);
8362 MONO_ADD_INS (bblock, ins);
8368 CHECK_STACK_OVF (1);
8369 n = (*ip)-CEE_LDARG_0;
8371 EMIT_NEW_ARGLOAD (cfg, ins, n);
8379 CHECK_STACK_OVF (1);
8380 n = (*ip)-CEE_LDLOC_0;
8382 EMIT_NEW_LOCLOAD (cfg, ins, n);
8391 n = (*ip)-CEE_STLOC_0;
8394 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
8396 emit_stloc_ir (cfg, sp, header, n);
8403 CHECK_STACK_OVF (1);
8406 EMIT_NEW_ARGLOAD (cfg, ins, n);
8412 CHECK_STACK_OVF (1);
8415 NEW_ARGLOADA (cfg, ins, n);
8416 MONO_ADD_INS (cfg->cbb, ins);
8426 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
8428 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
8433 CHECK_STACK_OVF (1);
8436 EMIT_NEW_LOCLOAD (cfg, ins, n);
8440 case CEE_LDLOCA_S: {
8441 unsigned char *tmp_ip;
8443 CHECK_STACK_OVF (1);
8444 CHECK_LOCAL (ip [1]);
8446 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
8452 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
8461 CHECK_LOCAL (ip [1]);
8462 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
8464 emit_stloc_ir (cfg, sp, header, ip [1]);
8469 CHECK_STACK_OVF (1);
8470 EMIT_NEW_PCONST (cfg, ins, NULL);
8471 ins->type = STACK_OBJ;
8476 CHECK_STACK_OVF (1);
8477 EMIT_NEW_ICONST (cfg, ins, -1);
8490 CHECK_STACK_OVF (1);
8491 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
8497 CHECK_STACK_OVF (1);
8499 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
8505 CHECK_STACK_OVF (1);
8506 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
8512 CHECK_STACK_OVF (1);
8513 MONO_INST_NEW (cfg, ins, OP_I8CONST);
8514 ins->type = STACK_I8;
8515 ins->dreg = alloc_dreg (cfg, STACK_I8);
8517 ins->inst_l = (gint64)read64 (ip);
8518 MONO_ADD_INS (bblock, ins);
8524 gboolean use_aotconst = FALSE;
8526 #ifdef TARGET_POWERPC
8527 /* FIXME: Clean this up */
8528 if (cfg->compile_aot)
8529 use_aotconst = TRUE;
8532 /* FIXME: we should really allocate this only late in the compilation process */
8533 f = mono_domain_alloc (cfg->domain, sizeof (float));
8535 CHECK_STACK_OVF (1);
8541 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
8543 dreg = alloc_freg (cfg);
8544 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
8545 ins->type = cfg->r4_stack_type;
8547 MONO_INST_NEW (cfg, ins, OP_R4CONST);
8548 ins->type = cfg->r4_stack_type;
8549 ins->dreg = alloc_dreg (cfg, STACK_R8);
8551 MONO_ADD_INS (bblock, ins);
8561 gboolean use_aotconst = FALSE;
8563 #ifdef TARGET_POWERPC
8564 /* FIXME: Clean this up */
8565 if (cfg->compile_aot)
8566 use_aotconst = TRUE;
8569 /* FIXME: we should really allocate this only late in the compilation process */
8570 d = mono_domain_alloc (cfg->domain, sizeof (double));
8572 CHECK_STACK_OVF (1);
8578 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
8580 dreg = alloc_freg (cfg);
8581 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
8582 ins->type = STACK_R8;
8584 MONO_INST_NEW (cfg, ins, OP_R8CONST);
8585 ins->type = STACK_R8;
8586 ins->dreg = alloc_dreg (cfg, STACK_R8);
8588 MONO_ADD_INS (bblock, ins);
8597 MonoInst *temp, *store;
8599 CHECK_STACK_OVF (1);
8603 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
8604 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
8606 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
8609 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
8622 if (sp [0]->type == STACK_R8)
8623 /* we need to pop the value from the x86 FP stack */
8624 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
8630 INLINE_FAILURE ("jmp");
8631 GSHAREDVT_FAILURE (*ip);
8634 if (stack_start != sp)
8636 token = read32 (ip + 1);
8637 /* FIXME: check the signature matches */
8638 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
8640 if (!cmethod || mono_loader_get_last_error ())
8643 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
8644 GENERIC_SHARING_FAILURE (CEE_JMP);
8646 if (mono_security_cas_enabled ())
8647 CHECK_CFG_EXCEPTION;
8649 emit_instrumentation_call (cfg, mono_profiler_method_leave);
8651 if (ARCH_HAVE_OP_TAIL_CALL) {
8652 MonoMethodSignature *fsig = mono_method_signature (cmethod);
8655 /* Handle tail calls similarly to calls */
8656 n = fsig->param_count + fsig->hasthis;
8660 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
8661 call->method = cmethod;
8662 call->tail_call = TRUE;
8663 call->signature = mono_method_signature (cmethod);
8664 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
8665 call->inst.inst_p0 = cmethod;
8666 for (i = 0; i < n; ++i)
8667 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
8669 mono_arch_emit_call (cfg, call);
8670 cfg->param_area = MAX(cfg->param_area, call->stack_usage);
8671 MONO_ADD_INS (bblock, (MonoInst*)call);
8673 for (i = 0; i < num_args; ++i)
8674 /* Prevent arguments from being optimized away */
8675 arg_array [i]->flags |= MONO_INST_VOLATILE;
8677 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
8678 ins = (MonoInst*)call;
8679 ins->inst_p0 = cmethod;
8680 MONO_ADD_INS (bblock, ins);
8684 start_new_bblock = 1;
8689 case CEE_CALLVIRT: {
8690 MonoInst *addr = NULL;
8691 MonoMethodSignature *fsig = NULL;
8693 int virtual = *ip == CEE_CALLVIRT;
8694 int calli = *ip == CEE_CALLI;
8695 gboolean pass_imt_from_rgctx = FALSE;
8696 MonoInst *imt_arg = NULL;
8697 MonoInst *keep_this_alive = NULL;
8698 gboolean pass_vtable = FALSE;
8699 gboolean pass_mrgctx = FALSE;
8700 MonoInst *vtable_arg = NULL;
8701 gboolean check_this = FALSE;
8702 gboolean supported_tail_call = FALSE;
8703 gboolean tail_call = FALSE;
8704 gboolean need_seq_point = FALSE;
8705 guint32 call_opcode = *ip;
8706 gboolean emit_widen = TRUE;
8707 gboolean push_res = TRUE;
8708 gboolean skip_ret = FALSE;
8709 gboolean delegate_invoke = FALSE;
8712 token = read32 (ip + 1);
8717 //GSHAREDVT_FAILURE (*ip);
8722 fsig = mini_get_signature (method, token, generic_context);
8723 n = fsig->param_count + fsig->hasthis;
8725 if (method->dynamic && fsig->pinvoke) {
8729 * This is a call through a function pointer using a pinvoke
8730 * signature. Have to create a wrapper and call that instead.
8731 * FIXME: This is very slow, need to create a wrapper at JIT time
8732 * instead based on the signature.
8734 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
8735 EMIT_NEW_PCONST (cfg, args [1], fsig);
8737 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
8740 MonoMethod *cil_method;
8742 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
8743 cil_method = cmethod;
8745 if (constrained_class) {
8746 if ((constrained_class->byval_arg.type == MONO_TYPE_VAR || constrained_class->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
8747 if (!mini_is_gsharedvt_klass (cfg, constrained_class)) {
8748 g_assert (!cmethod->klass->valuetype);
8749 if (!mini_type_is_reference (cfg, &constrained_class->byval_arg)) {
8750 /* FIXME: gshared type constrained to a primitive type */
8751 GENERIC_SHARING_FAILURE (CEE_CALL);
8756 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8757 if (cfg->verbose_level > 2)
8758 printf ("DM Constrained call to %s\n", mono_type_get_full_name (constrained_class));
8759 if (!((constrained_class->byval_arg.type == MONO_TYPE_VAR ||
8760 constrained_class->byval_arg.type == MONO_TYPE_MVAR) &&
8761 cfg->generic_sharing_context)) {
8762 cmethod = mono_get_method_constrained_with_method (image, cil_method, constrained_class, generic_context, &cfg->error);
8766 if (cfg->verbose_level > 2)
8767 printf ("Constrained call to %s\n", mono_type_get_full_name (constrained_class));
8769 if ((constrained_class->byval_arg.type == MONO_TYPE_VAR || constrained_class->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
8771 * This is needed since get_method_constrained can't find
8772 * the method in klass representing a type var.
8773 * The type var is guaranteed to be a reference type in this
8776 if (!mini_is_gsharedvt_klass (cfg, constrained_class))
8777 g_assert (!cmethod->klass->valuetype);
8779 cmethod = mono_get_method_constrained_checked (image, token, constrained_class, generic_context, &cil_method, &cfg->error);
8785 if (!cmethod || mono_loader_get_last_error ())
8787 if (!dont_verify && !cfg->skip_visibility) {
8788 MonoMethod *target_method = cil_method;
8789 if (method->is_inflated) {
8790 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
8792 if (!mono_method_can_access_method (method_definition, target_method) &&
8793 !mono_method_can_access_method (method, cil_method))
8794 METHOD_ACCESS_FAILURE (method, cil_method);
8797 if (mono_security_core_clr_enabled ())
8798 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
8800 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
8801 /* MS.NET seems to silently convert this to a callvirt */
8806 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
8807 * converts to a callvirt.
8809 * tests/bug-515884.il is an example of this behavior
8811 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
8812 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
8813 if (!virtual && mono_class_is_marshalbyref (cmethod->klass) && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
8817 if (!cmethod->klass->inited)
8818 if (!mono_class_init (cmethod->klass))
8819 TYPE_LOAD_ERROR (cmethod->klass);
8821 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
8822 mini_class_is_system_array (cmethod->klass)) {
8823 array_rank = cmethod->klass->rank;
8824 fsig = mono_method_signature (cmethod);
8826 fsig = mono_method_signature (cmethod);
8831 if (fsig->pinvoke) {
8832 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
8833 check_for_pending_exc, cfg->compile_aot);
8834 fsig = mono_method_signature (wrapper);
8835 } else if (constrained_class) {
8836 fsig = mono_method_signature (cmethod);
8838 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
8843 mono_save_token_info (cfg, image, token, cil_method);
8845 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip + 5 - header->code)))
8846 need_seq_point = TRUE;
8848 n = fsig->param_count + fsig->hasthis;
8850 /* Don't support calls made using type arguments for now */
8852 if (cfg->gsharedvt) {
8853 if (mini_is_gsharedvt_signature (cfg, fsig))
8854 GSHAREDVT_FAILURE (*ip);
8858 if (mono_security_cas_enabled ()) {
8859 if (check_linkdemand (cfg, method, cmethod))
8860 INLINE_FAILURE ("linkdemand");
8861 CHECK_CFG_EXCEPTION;
8864 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
8865 g_assert_not_reached ();
8868 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
8871 if (!cfg->generic_sharing_context && cmethod)
8872 g_assert (!mono_method_check_context_used (cmethod));
8876 //g_assert (!virtual || fsig->hasthis);
8880 if (constrained_class) {
8881 if (mini_is_gsharedvt_klass (cfg, constrained_class)) {
8882 if ((cmethod->klass != mono_defaults.object_class) && constrained_class->valuetype && cmethod->klass->valuetype) {
8883 /* The 'Own method' case below */
8884 } else if (cmethod->klass->image != mono_defaults.corlib && !(cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) && !cmethod->klass->valuetype) {
8885 /* 'The type parameter is instantiated as a reference type' case below. */
8887 ins = handle_constrained_gsharedvt_call (cfg, cmethod, fsig, sp, constrained_class, &emit_widen, &bblock);
8888 CHECK_CFG_EXCEPTION;
8895 * We have the `constrained.' prefix opcode.
8897 if (constrained_class->valuetype && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
8899 * The type parameter is instantiated as a valuetype,
8900 * but that type doesn't override the method we're
8901 * calling, so we need to box `this'.
8903 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
8904 ins->klass = constrained_class;
8905 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class), &bblock);
8906 CHECK_CFG_EXCEPTION;
8907 } else if (!constrained_class->valuetype) {
8908 int dreg = alloc_ireg_ref (cfg);
8911 * The type parameter is instantiated as a reference
8912 * type. We have a managed pointer on the stack, so
8913 * we need to dereference it here.
8915 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
8916 ins->type = STACK_OBJ;
8919 if (cmethod->klass->valuetype) {
8922 /* Interface method */
8925 mono_class_setup_vtable (constrained_class);
8926 CHECK_TYPELOAD (constrained_class);
8927 ioffset = mono_class_interface_offset (constrained_class, cmethod->klass);
8929 TYPE_LOAD_ERROR (constrained_class);
8930 slot = mono_method_get_vtable_slot (cmethod);
8932 TYPE_LOAD_ERROR (cmethod->klass);
8933 cmethod = constrained_class->vtable [ioffset + slot];
8935 if (cmethod->klass == mono_defaults.enum_class) {
8936 /* Enum implements some interfaces, so treat this as the first case */
8937 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
8938 ins->klass = constrained_class;
8939 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class), &bblock);
8940 CHECK_CFG_EXCEPTION;
8945 constrained_class = NULL;
8948 if (!calli && check_call_signature (cfg, fsig, sp))
8951 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
8952 if (cmethod && (cmethod->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (cmethod->name, "Invoke"))
8953 delegate_invoke = TRUE;
8956 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_sharable_method (cfg, cmethod, fsig, sp))) {
8958 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8959 type_to_eval_stack_type ((cfg), fsig->ret, ins);
8967 * If the callee is a shared method, then its static cctor
8968 * might not get called after the call was patched.
8970 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
8971 emit_generic_class_init (cfg, cmethod->klass);
8972 CHECK_TYPELOAD (cmethod->klass);
8976 check_method_sharing (cfg, cmethod, &pass_vtable, &pass_mrgctx);
8978 if (cfg->generic_sharing_context && cmethod) {
8979 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
8981 context_used = mini_method_check_context_used (cfg, cmethod);
8983 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
8984 /* Generic method interface
8985 calls are resolved via a
8986 helper function and don't
8988 if (!cmethod_context || !cmethod_context->method_inst)
8989 pass_imt_from_rgctx = TRUE;
8993 * If a shared method calls another
8994 * shared method then the caller must
8995 * have a generic sharing context
8996 * because the magic trampoline
8997 * requires it. FIXME: We shouldn't
8998 * have to force the vtable/mrgctx
8999 * variable here. Instead there
9000 * should be a flag in the cfg to
9001 * request a generic sharing context.
9004 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
9005 mono_get_vtable_var (cfg);
9010 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
9012 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
9014 CHECK_TYPELOAD (cmethod->klass);
9015 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
9020 g_assert (!vtable_arg);
9022 if (!cfg->compile_aot) {
9024 * emit_get_rgctx_method () calls mono_class_vtable () so check
9025 * for type load errors before.
9027 mono_class_setup_vtable (cmethod->klass);
9028 CHECK_TYPELOAD (cmethod->klass);
9031 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
9033 /* !marshalbyref is needed to properly handle generic methods + remoting */
9034 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
9035 MONO_METHOD_IS_FINAL (cmethod)) &&
9036 !mono_class_is_marshalbyref (cmethod->klass)) {
9043 if (pass_imt_from_rgctx) {
9044 g_assert (!pass_vtable);
9047 imt_arg = emit_get_rgctx_method (cfg, context_used,
9048 cmethod, MONO_RGCTX_INFO_METHOD);
9052 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
9054 /* Calling virtual generic methods */
9055 if (cmethod && virtual &&
9056 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
9057 !(MONO_METHOD_IS_FINAL (cmethod) &&
9058 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
9059 fsig->generic_param_count &&
9060 !(cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))) {
9061 MonoInst *this_temp, *this_arg_temp, *store;
9062 MonoInst *iargs [4];
9063 gboolean use_imt = FALSE;
9065 g_assert (fsig->is_inflated);
9067 /* Prevent inlining of methods that contain indirect calls */
9068 INLINE_FAILURE ("virtual generic call");
9070 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))
9071 GSHAREDVT_FAILURE (*ip);
9073 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK && defined(MONO_ARCH_GSHARED_SUPPORTED)
9074 if (cmethod->wrapper_type == MONO_WRAPPER_NONE && mono_use_imt)
9079 g_assert (!imt_arg);
9081 g_assert (cmethod->is_inflated);
9082 imt_arg = emit_get_rgctx_method (cfg, context_used,
9083 cmethod, MONO_RGCTX_INFO_METHOD);
9084 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, sp [0], imt_arg, NULL);
9086 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
9087 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
9088 MONO_ADD_INS (bblock, store);
9090 /* FIXME: This should be a managed pointer */
9091 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
9093 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
9094 iargs [1] = emit_get_rgctx_method (cfg, context_used,
9095 cmethod, MONO_RGCTX_INFO_METHOD);
9096 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
9097 addr = mono_emit_jit_icall (cfg,
9098 mono_helper_compile_generic_method, iargs);
9100 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
9102 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9109 * Implement a workaround for the inherent races involved in locking:
9115 * If a thread abort happens between the call to Monitor.Enter () and the start of the
9116 * try block, the Exit () won't be executed, see:
9117 * http://www.bluebytesoftware.com/blog/2007/01/30/MonitorEnterThreadAbortsAndOrphanedLocks.aspx
9118 * To work around this, we extend such try blocks to include the last x bytes
9119 * of the Monitor.Enter () call.
9121 if (cmethod && cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
9122 MonoBasicBlock *tbb;
9124 GET_BBLOCK (cfg, tbb, ip + 5);
9126 * Only extend try blocks with a finally, to avoid catching exceptions thrown
9127 * from Monitor.Enter like ArgumentNullException.
9129 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
9130 /* Mark this bblock as needing to be extended */
9131 tbb->extend_try_block = TRUE;
9135 /* Conversion to a JIT intrinsic */
9136 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
9138 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9139 type_to_eval_stack_type ((cfg), fsig->ret, ins);
9146 if (cmethod && (cfg->opt & MONO_OPT_INLINE) &&
9147 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
9148 mono_method_check_inlining (cfg, cmethod)) {
9150 gboolean always = FALSE;
9152 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
9153 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
9154 /* Prevent inlining of methods that call wrappers */
9155 INLINE_FAILURE ("wrapper call");
9156 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
9160 costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, always, &bblock);
9162 cfg->real_offset += 5;
9164 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9165 /* *sp is already set by inline_method */
9170 inline_costs += costs;
9176 /* Tail recursion elimination */
9177 if ((cfg->opt & MONO_OPT_TAILC) && call_opcode == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
9178 gboolean has_vtargs = FALSE;
9181 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
9182 INLINE_FAILURE ("tail call");
9184 /* keep it simple */
9185 for (i = fsig->param_count - 1; i >= 0; i--) {
9186 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
9191 for (i = 0; i < n; ++i)
9192 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
9193 MONO_INST_NEW (cfg, ins, OP_BR);
9194 MONO_ADD_INS (bblock, ins);
9195 tblock = start_bblock->out_bb [0];
9196 link_bblock (cfg, bblock, tblock);
9197 ins->inst_target_bb = tblock;
9198 start_new_bblock = 1;
9200 /* skip the CEE_RET, too */
9201 if (ip_in_bb (cfg, bblock, ip + 5))
9208 inline_costs += 10 * num_calls++;
9211 * Making generic calls out of gsharedvt methods.
9212 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
9213 * patching gshared method addresses into a gsharedvt method.
9215 if (cmethod && cfg->gsharedvt && (mini_is_gsharedvt_signature (cfg, fsig) || cmethod->is_inflated || cmethod->klass->generic_class) &&
9216 !(cmethod->klass->rank && cmethod->klass->byval_arg.type != MONO_TYPE_SZARRAY)) {
9217 MonoRgctxInfoType info_type;
9220 //if (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)
9221 //GSHAREDVT_FAILURE (*ip);
9222 // disable for possible remoting calls
9223 if (fsig->hasthis && (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class))
9224 GSHAREDVT_FAILURE (*ip);
9225 if (fsig->generic_param_count) {
9226 /* virtual generic call */
9227 g_assert (mono_use_imt);
9228 g_assert (!imt_arg);
9229 /* Same as the virtual generic case above */
9230 imt_arg = emit_get_rgctx_method (cfg, context_used,
9231 cmethod, MONO_RGCTX_INFO_METHOD);
9232 /* This is not needed, as the trampoline code will pass one, and it might be passed in the same reg as the imt arg */
9234 } else if ((cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) && !imt_arg) {
9235 /* This can happen when we call a fully instantiated iface method */
9236 imt_arg = emit_get_rgctx_method (cfg, context_used,
9237 cmethod, MONO_RGCTX_INFO_METHOD);
9242 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (cmethod->name, "Invoke")))
9243 keep_this_alive = sp [0];
9245 if (virtual && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))
9246 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE_VIRT;
9248 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE;
9249 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, info_type);
9251 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
9253 } else if (calli && cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
9255 * We pass the address to the gsharedvt trampoline in the rgctx reg
9257 MonoInst *callee = addr;
9259 if (method->wrapper_type != MONO_WRAPPER_DELEGATE_INVOKE)
9261 GSHAREDVT_FAILURE (*ip);
9263 addr = emit_get_rgctx_sig (cfg, context_used,
9264 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
9265 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, callee);
9269 /* Generic sharing */
9272 * Use this if the callee is gsharedvt sharable too, since
9273 * at runtime we might find an instantiation so the call cannot
9274 * be patched (the 'no_patch' code path in mini-trampolines.c).
9276 if (context_used && !imt_arg && !array_rank && !delegate_invoke &&
9277 (!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
9278 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
9279 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
9280 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
9281 INLINE_FAILURE ("gshared");
9283 g_assert (cfg->generic_sharing_context && cmethod);
9287 * We are compiling a call to a
9288 * generic method from shared code,
9289 * which means that we have to look up
9290 * the method in the rgctx and do an
9294 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
9296 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
9297 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
9301 /* Indirect calls */
9303 if (call_opcode == CEE_CALL)
9304 g_assert (context_used);
9305 else if (call_opcode == CEE_CALLI)
9306 g_assert (!vtable_arg);
9308 /* FIXME: what the hell is this??? */
9309 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
9310 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
9312 /* Prevent inlining of methods with indirect calls */
9313 INLINE_FAILURE ("indirect call");
9315 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST || addr->opcode == OP_GOT_ENTRY) {
9320 * Instead of emitting an indirect call, emit a direct call
9321 * with the contents of the aotconst as the patch info.
9323 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST) {
9324 info_type = addr->inst_c1;
9325 info_data = addr->inst_p0;
9327 info_type = addr->inst_right->inst_c1;
9328 info_data = addr->inst_right->inst_left;
9331 if (info_type == MONO_PATCH_INFO_ICALL_ADDR || info_type == MONO_PATCH_INFO_JIT_ICALL_ADDR) {
9332 ins = (MonoInst*)mono_emit_abs_call (cfg, info_type, info_data, fsig, sp);
9337 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
9345 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
9346 MonoInst *val = sp [fsig->param_count];
9348 if (val->type == STACK_OBJ) {
9349 MonoInst *iargs [2];
9354 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
9357 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
9358 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, val->dreg);
9359 if (cfg->gen_write_barriers && val->type == STACK_OBJ && !(val->opcode == OP_PCONST && val->inst_c0 == 0))
9360 emit_write_barrier (cfg, addr, val);
9361 if (cfg->gen_write_barriers && mini_is_gsharedvt_klass (cfg, cmethod->klass))
9362 GSHAREDVT_FAILURE (*ip);
9363 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
9364 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
9366 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
9367 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
9368 if (!cmethod->klass->element_class->valuetype && !readonly)
9369 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
9370 CHECK_TYPELOAD (cmethod->klass);
9373 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
9376 g_assert_not_reached ();
9383 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
9387 /* Tail prefix / tail call optimization */
9389 /* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests */
9390 /* FIXME: runtime generic context pointer for jumps? */
9391 /* FIXME: handle this for generic sharing eventually */
9392 if (cmethod && (ins_flag & MONO_INST_TAILCALL) &&
9393 !vtable_arg && !cfg->generic_sharing_context && is_supported_tail_call (cfg, method, cmethod, fsig, call_opcode))
9394 supported_tail_call = TRUE;
9396 if (supported_tail_call) {
9399 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
9400 INLINE_FAILURE ("tail call");
9402 //printf ("HIT: %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
9404 if (ARCH_HAVE_OP_TAIL_CALL) {
9405 /* Handle tail calls similarly to normal calls */
9408 emit_instrumentation_call (cfg, mono_profiler_method_leave);
9410 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
9411 call->tail_call = TRUE;
9412 call->method = cmethod;
9413 call->signature = mono_method_signature (cmethod);
9416 * We implement tail calls by storing the actual arguments into the
9417 * argument variables, then emitting a CEE_JMP.
9419 for (i = 0; i < n; ++i) {
9420 /* Prevent argument from being register allocated */
9421 arg_array [i]->flags |= MONO_INST_VOLATILE;
9422 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
9424 ins = (MonoInst*)call;
9425 ins->inst_p0 = cmethod;
9426 ins->inst_p1 = arg_array [0];
9427 MONO_ADD_INS (bblock, ins);
9428 link_bblock (cfg, bblock, end_bblock);
9429 start_new_bblock = 1;
9431 // FIXME: Eliminate unreachable epilogs
9434 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
9435 * only reachable from this call.
9437 GET_BBLOCK (cfg, tblock, ip + 5);
9438 if (tblock == bblock || tblock->in_count == 0)
9447 * Synchronized wrappers.
9448 * Its hard to determine where to replace a method with its synchronized
9449 * wrapper without causing an infinite recursion. The current solution is
9450 * to add the synchronized wrapper in the trampolines, and to
9451 * change the called method to a dummy wrapper, and resolve that wrapper
9452 * to the real method in mono_jit_compile_method ().
9454 if (cfg->method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
9455 MonoMethod *orig = mono_marshal_method_from_wrapper (cfg->method);
9456 if (cmethod == orig || (cmethod->is_inflated && mono_method_get_declaring_generic_method (cmethod) == orig))
9457 cmethod = mono_marshal_get_synchronized_inner_wrapper (cmethod);
9461 INLINE_FAILURE ("call");
9462 ins = mono_emit_method_call_full (cfg, cmethod, fsig, tail_call, sp, virtual ? sp [0] : NULL,
9463 imt_arg, vtable_arg);
9466 link_bblock (cfg, bblock, end_bblock);
9467 start_new_bblock = 1;
9469 // FIXME: Eliminate unreachable epilogs
9472 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
9473 * only reachable from this call.
9475 GET_BBLOCK (cfg, tblock, ip + 5);
9476 if (tblock == bblock || tblock->in_count == 0)
9483 /* End of call, INS should contain the result of the call, if any */
9485 if (push_res && !MONO_TYPE_IS_VOID (fsig->ret)) {
9488 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
9493 if (keep_this_alive) {
9494 MonoInst *dummy_use;
9496 /* See mono_emit_method_call_full () */
9497 EMIT_NEW_DUMMY_USE (cfg, dummy_use, keep_this_alive);
9500 CHECK_CFG_EXCEPTION;
9504 g_assert (*ip == CEE_RET);
9508 constrained_class = NULL;
9510 emit_seq_point (cfg, method, ip, FALSE, TRUE);
9514 if (cfg->method != method) {
9515 /* return from inlined method */
9517 * If in_count == 0, that means the ret is unreachable due to
9518 * being preceeded by a throw. In that case, inline_method () will
9519 * handle setting the return value
9520 * (test case: test_0_inline_throw ()).
9522 if (return_var && cfg->cbb->in_count) {
9523 MonoType *ret_type = mono_method_signature (method)->ret;
9529 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
9532 //g_assert (returnvar != -1);
9533 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
9534 cfg->ret_var_set = TRUE;
9537 emit_instrumentation_call (cfg, mono_profiler_method_leave);
9539 if (cfg->lmf_var && cfg->cbb->in_count)
9543 MonoType *ret_type = mini_get_underlying_type (cfg, mono_method_signature (method)->ret);
9545 if (seq_points && !sym_seq_points) {
9547 * Place a seq point here too even through the IL stack is not
9548 * empty, so a step over on
9551 * will work correctly.
9553 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
9554 MONO_ADD_INS (cfg->cbb, ins);
9557 g_assert (!return_var);
9561 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
9564 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
9567 if (!cfg->vret_addr) {
9570 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
9572 EMIT_NEW_RETLOADA (cfg, ret_addr);
9574 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
9575 ins->klass = mono_class_from_mono_type (ret_type);
9578 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
9579 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
9580 MonoInst *iargs [1];
9584 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
9585 mono_arch_emit_setret (cfg, method, conv);
9587 mono_arch_emit_setret (cfg, method, *sp);
9590 mono_arch_emit_setret (cfg, method, *sp);
9595 if (sp != stack_start)
9597 MONO_INST_NEW (cfg, ins, OP_BR);
9599 ins->inst_target_bb = end_bblock;
9600 MONO_ADD_INS (bblock, ins);
9601 link_bblock (cfg, bblock, end_bblock);
9602 start_new_bblock = 1;
9606 MONO_INST_NEW (cfg, ins, OP_BR);
9608 target = ip + 1 + (signed char)(*ip);
9610 GET_BBLOCK (cfg, tblock, target);
9611 link_bblock (cfg, bblock, tblock);
9612 ins->inst_target_bb = tblock;
9613 if (sp != stack_start) {
9614 handle_stack_args (cfg, stack_start, sp - stack_start);
9616 CHECK_UNVERIFIABLE (cfg);
9618 MONO_ADD_INS (bblock, ins);
9619 start_new_bblock = 1;
9620 inline_costs += BRANCH_COST;
9634 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
9636 target = ip + 1 + *(signed char*)ip;
9642 inline_costs += BRANCH_COST;
9646 MONO_INST_NEW (cfg, ins, OP_BR);
9649 target = ip + 4 + (gint32)read32(ip);
9651 GET_BBLOCK (cfg, tblock, target);
9652 link_bblock (cfg, bblock, tblock);
9653 ins->inst_target_bb = tblock;
9654 if (sp != stack_start) {
9655 handle_stack_args (cfg, stack_start, sp - stack_start);
9657 CHECK_UNVERIFIABLE (cfg);
9660 MONO_ADD_INS (bblock, ins);
9662 start_new_bblock = 1;
9663 inline_costs += BRANCH_COST;
9670 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
9671 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
9672 guint32 opsize = is_short ? 1 : 4;
9674 CHECK_OPSIZE (opsize);
9676 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
9679 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
9684 GET_BBLOCK (cfg, tblock, target);
9685 link_bblock (cfg, bblock, tblock);
9686 GET_BBLOCK (cfg, tblock, ip);
9687 link_bblock (cfg, bblock, tblock);
9689 if (sp != stack_start) {
9690 handle_stack_args (cfg, stack_start, sp - stack_start);
9691 CHECK_UNVERIFIABLE (cfg);
9694 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
9695 cmp->sreg1 = sp [0]->dreg;
9696 type_from_op (cfg, cmp, sp [0], NULL);
9699 #if SIZEOF_REGISTER == 4
9700 if (cmp->opcode == OP_LCOMPARE_IMM) {
9701 /* Convert it to OP_LCOMPARE */
9702 MONO_INST_NEW (cfg, ins, OP_I8CONST);
9703 ins->type = STACK_I8;
9704 ins->dreg = alloc_dreg (cfg, STACK_I8);
9706 MONO_ADD_INS (bblock, ins);
9707 cmp->opcode = OP_LCOMPARE;
9708 cmp->sreg2 = ins->dreg;
9711 MONO_ADD_INS (bblock, cmp);
9713 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
9714 type_from_op (cfg, ins, sp [0], NULL);
9715 MONO_ADD_INS (bblock, ins);
9716 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
9717 GET_BBLOCK (cfg, tblock, target);
9718 ins->inst_true_bb = tblock;
9719 GET_BBLOCK (cfg, tblock, ip);
9720 ins->inst_false_bb = tblock;
9721 start_new_bblock = 2;
9724 inline_costs += BRANCH_COST;
9739 MONO_INST_NEW (cfg, ins, *ip);
9741 target = ip + 4 + (gint32)read32(ip);
9747 inline_costs += BRANCH_COST;
9751 MonoBasicBlock **targets;
9752 MonoBasicBlock *default_bblock;
9753 MonoJumpInfoBBTable *table;
9754 int offset_reg = alloc_preg (cfg);
9755 int target_reg = alloc_preg (cfg);
9756 int table_reg = alloc_preg (cfg);
9757 int sum_reg = alloc_preg (cfg);
9758 gboolean use_op_switch;
9762 n = read32 (ip + 1);
9765 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
9769 CHECK_OPSIZE (n * sizeof (guint32));
9770 target = ip + n * sizeof (guint32);
9772 GET_BBLOCK (cfg, default_bblock, target);
9773 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
9775 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
9776 for (i = 0; i < n; ++i) {
9777 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
9778 targets [i] = tblock;
9779 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
9783 if (sp != stack_start) {
9785 * Link the current bb with the targets as well, so handle_stack_args
9786 * will set their in_stack correctly.
9788 link_bblock (cfg, bblock, default_bblock);
9789 for (i = 0; i < n; ++i)
9790 link_bblock (cfg, bblock, targets [i]);
9792 handle_stack_args (cfg, stack_start, sp - stack_start);
9794 CHECK_UNVERIFIABLE (cfg);
9797 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
9798 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
9801 for (i = 0; i < n; ++i)
9802 link_bblock (cfg, bblock, targets [i]);
9804 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
9805 table->table = targets;
9806 table->table_size = n;
9808 use_op_switch = FALSE;
9810 /* ARM implements SWITCH statements differently */
9811 /* FIXME: Make it use the generic implementation */
9812 if (!cfg->compile_aot)
9813 use_op_switch = TRUE;
9816 if (COMPILE_LLVM (cfg))
9817 use_op_switch = TRUE;
9819 cfg->cbb->has_jump_table = 1;
9821 if (use_op_switch) {
9822 MONO_INST_NEW (cfg, ins, OP_SWITCH);
9823 ins->sreg1 = src1->dreg;
9824 ins->inst_p0 = table;
9825 ins->inst_many_bb = targets;
9826 ins->klass = GUINT_TO_POINTER (n);
9827 MONO_ADD_INS (cfg->cbb, ins);
9829 if (sizeof (gpointer) == 8)
9830 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
9832 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
9834 #if SIZEOF_REGISTER == 8
9835 /* The upper word might not be zero, and we add it to a 64 bit address later */
9836 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
9839 if (cfg->compile_aot) {
9840 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
9842 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
9843 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
9844 ins->inst_p0 = table;
9845 ins->dreg = table_reg;
9846 MONO_ADD_INS (cfg->cbb, ins);
9849 /* FIXME: Use load_memindex */
9850 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
9851 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
9852 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
9854 start_new_bblock = 1;
9855 inline_costs += (BRANCH_COST * 2);
9875 dreg = alloc_freg (cfg);
9878 dreg = alloc_lreg (cfg);
9881 dreg = alloc_ireg_ref (cfg);
9884 dreg = alloc_preg (cfg);
9887 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
9888 ins->type = ldind_type [*ip - CEE_LDIND_I1];
9889 if (*ip == CEE_LDIND_R4)
9890 ins->type = cfg->r4_stack_type;
9891 ins->flags |= ins_flag;
9892 MONO_ADD_INS (bblock, ins);
9894 if (ins_flag & MONO_INST_VOLATILE) {
9895 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
9896 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
9912 if (ins_flag & MONO_INST_VOLATILE) {
9913 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
9914 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
9917 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
9918 ins->flags |= ins_flag;
9921 MONO_ADD_INS (bblock, ins);
9923 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0)))
9924 emit_write_barrier (cfg, sp [0], sp [1]);
9933 MONO_INST_NEW (cfg, ins, (*ip));
9935 ins->sreg1 = sp [0]->dreg;
9936 ins->sreg2 = sp [1]->dreg;
9937 type_from_op (cfg, ins, sp [0], sp [1]);
9939 ins->dreg = alloc_dreg ((cfg), (ins)->type);
9941 /* Use the immediate opcodes if possible */
9942 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
9943 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
9944 if (imm_opcode != -1) {
9945 ins->opcode = imm_opcode;
9946 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
9949 NULLIFY_INS (sp [1]);
9953 MONO_ADD_INS ((cfg)->cbb, (ins));
9955 *sp++ = mono_decompose_opcode (cfg, ins, &bblock);
9972 MONO_INST_NEW (cfg, ins, (*ip));
9974 ins->sreg1 = sp [0]->dreg;
9975 ins->sreg2 = sp [1]->dreg;
9976 type_from_op (cfg, ins, sp [0], sp [1]);
9978 add_widen_op (cfg, ins, &sp [0], &sp [1]);
9979 ins->dreg = alloc_dreg ((cfg), (ins)->type);
9981 /* FIXME: Pass opcode to is_inst_imm */
9983 /* Use the immediate opcodes if possible */
9984 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
9987 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
9988 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
9989 /* Keep emulated opcodes which are optimized away later */
9990 if ((ins->opcode == OP_IREM_UN || ins->opcode == OP_IDIV_UN_IMM) && (cfg->opt & (MONO_OPT_CONSPROP | MONO_OPT_COPYPROP)) && sp [1]->opcode == OP_ICONST && mono_is_power_of_two (sp [1]->inst_c0) >= 0) {
9991 imm_opcode = mono_op_to_op_imm (ins->opcode);
9994 if (imm_opcode != -1) {
9995 ins->opcode = imm_opcode;
9996 if (sp [1]->opcode == OP_I8CONST) {
9997 #if SIZEOF_REGISTER == 8
9998 ins->inst_imm = sp [1]->inst_l;
10000 ins->inst_ls_word = sp [1]->inst_ls_word;
10001 ins->inst_ms_word = sp [1]->inst_ms_word;
10005 ins->inst_imm = (gssize)(sp [1]->inst_c0);
10008 /* Might be followed by an instruction added by add_widen_op */
10009 if (sp [1]->next == NULL)
10010 NULLIFY_INS (sp [1]);
10013 MONO_ADD_INS ((cfg)->cbb, (ins));
10015 *sp++ = mono_decompose_opcode (cfg, ins, &bblock);
10028 case CEE_CONV_OVF_I8:
10029 case CEE_CONV_OVF_U8:
10030 case CEE_CONV_R_UN:
10033 /* Special case this earlier so we have long constants in the IR */
10034 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
10035 int data = sp [-1]->inst_c0;
10036 sp [-1]->opcode = OP_I8CONST;
10037 sp [-1]->type = STACK_I8;
10038 #if SIZEOF_REGISTER == 8
10039 if ((*ip) == CEE_CONV_U8)
10040 sp [-1]->inst_c0 = (guint32)data;
10042 sp [-1]->inst_c0 = data;
10044 sp [-1]->inst_ls_word = data;
10045 if ((*ip) == CEE_CONV_U8)
10046 sp [-1]->inst_ms_word = 0;
10048 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
10050 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
10057 case CEE_CONV_OVF_I4:
10058 case CEE_CONV_OVF_I1:
10059 case CEE_CONV_OVF_I2:
10060 case CEE_CONV_OVF_I:
10061 case CEE_CONV_OVF_U:
10064 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
10065 ADD_UNOP (CEE_CONV_OVF_I8);
10072 case CEE_CONV_OVF_U1:
10073 case CEE_CONV_OVF_U2:
10074 case CEE_CONV_OVF_U4:
10077 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
10078 ADD_UNOP (CEE_CONV_OVF_U8);
10085 case CEE_CONV_OVF_I1_UN:
10086 case CEE_CONV_OVF_I2_UN:
10087 case CEE_CONV_OVF_I4_UN:
10088 case CEE_CONV_OVF_I8_UN:
10089 case CEE_CONV_OVF_U1_UN:
10090 case CEE_CONV_OVF_U2_UN:
10091 case CEE_CONV_OVF_U4_UN:
10092 case CEE_CONV_OVF_U8_UN:
10093 case CEE_CONV_OVF_I_UN:
10094 case CEE_CONV_OVF_U_UN:
10101 CHECK_CFG_EXCEPTION;
10105 case CEE_ADD_OVF_UN:
10107 case CEE_MUL_OVF_UN:
10109 case CEE_SUB_OVF_UN:
10115 GSHAREDVT_FAILURE (*ip);
10118 token = read32 (ip + 1);
10119 klass = mini_get_class (method, token, generic_context);
10120 CHECK_TYPELOAD (klass);
10122 if (generic_class_is_reference_type (cfg, klass)) {
10123 MonoInst *store, *load;
10124 int dreg = alloc_ireg_ref (cfg);
10126 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
10127 load->flags |= ins_flag;
10128 MONO_ADD_INS (cfg->cbb, load);
10130 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
10131 store->flags |= ins_flag;
10132 MONO_ADD_INS (cfg->cbb, store);
10134 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER)
10135 emit_write_barrier (cfg, sp [0], sp [1]);
10137 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
10143 int loc_index = -1;
10149 token = read32 (ip + 1);
10150 klass = mini_get_class (method, token, generic_context);
10151 CHECK_TYPELOAD (klass);
10153 /* Optimize the common ldobj+stloc combination */
10156 loc_index = ip [6];
10163 loc_index = ip [5] - CEE_STLOC_0;
10170 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
10171 CHECK_LOCAL (loc_index);
10173 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
10174 ins->dreg = cfg->locals [loc_index]->dreg;
10175 ins->flags |= ins_flag;
10178 if (ins_flag & MONO_INST_VOLATILE) {
10179 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10180 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10186 /* Optimize the ldobj+stobj combination */
10187 /* The reference case ends up being a load+store anyway */
10188 /* Skip this if the operation is volatile. */
10189 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass) && !(ins_flag & MONO_INST_VOLATILE)) {
10194 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
10201 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
10202 ins->flags |= ins_flag;
10205 if (ins_flag & MONO_INST_VOLATILE) {
10206 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10207 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10216 CHECK_STACK_OVF (1);
10218 n = read32 (ip + 1);
10220 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
10221 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
10222 ins->type = STACK_OBJ;
10225 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
10226 MonoInst *iargs [1];
10227 char *str = mono_method_get_wrapper_data (method, n);
10229 if (cfg->compile_aot)
10230 EMIT_NEW_LDSTRLITCONST (cfg, iargs [0], str);
10232 EMIT_NEW_PCONST (cfg, iargs [0], str);
10233 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
10235 if (cfg->opt & MONO_OPT_SHARED) {
10236 MonoInst *iargs [3];
10238 if (cfg->compile_aot) {
10239 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
10241 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10242 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
10243 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
10244 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
10245 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
10247 if (bblock->out_of_line) {
10248 MonoInst *iargs [2];
10250 if (image == mono_defaults.corlib) {
10252 * Avoid relocations in AOT and save some space by using a
10253 * version of helper_ldstr specialized to mscorlib.
10255 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
10256 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
10258 /* Avoid creating the string object */
10259 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
10260 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
10261 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
10265 if (cfg->compile_aot) {
10266 NEW_LDSTRCONST (cfg, ins, image, n);
10268 MONO_ADD_INS (bblock, ins);
10271 NEW_PCONST (cfg, ins, NULL);
10272 ins->type = STACK_OBJ;
10273 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
10275 OUT_OF_MEMORY_FAILURE;
10278 MONO_ADD_INS (bblock, ins);
10287 MonoInst *iargs [2];
10288 MonoMethodSignature *fsig;
10291 MonoInst *vtable_arg = NULL;
10294 token = read32 (ip + 1);
10295 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
10296 if (!cmethod || mono_loader_get_last_error ())
10298 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
10301 mono_save_token_info (cfg, image, token, cmethod);
10303 if (!mono_class_init (cmethod->klass))
10304 TYPE_LOAD_ERROR (cmethod->klass);
10306 context_used = mini_method_check_context_used (cfg, cmethod);
10308 if (mono_security_cas_enabled ()) {
10309 if (check_linkdemand (cfg, method, cmethod))
10310 INLINE_FAILURE ("linkdemand");
10311 CHECK_CFG_EXCEPTION;
10312 } else if (mono_security_core_clr_enabled ()) {
10313 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
10316 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
10317 emit_generic_class_init (cfg, cmethod->klass);
10318 CHECK_TYPELOAD (cmethod->klass);
10322 if (cfg->gsharedvt) {
10323 if (mini_is_gsharedvt_variable_signature (sig))
10324 GSHAREDVT_FAILURE (*ip);
10328 n = fsig->param_count;
10332 * Generate smaller code for the common newobj <exception> instruction in
10333 * argument checking code.
10335 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
10336 is_exception_class (cmethod->klass) && n <= 2 &&
10337 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
10338 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
10339 MonoInst *iargs [3];
10343 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
10346 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
10349 iargs [1] = sp [0];
10350 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
10353 iargs [1] = sp [0];
10354 iargs [2] = sp [1];
10355 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
10358 g_assert_not_reached ();
10366 /* move the args to allow room for 'this' in the first position */
10372 /* check_call_signature () requires sp[0] to be set */
10373 this_ins.type = STACK_OBJ;
10374 sp [0] = &this_ins;
10375 if (check_call_signature (cfg, fsig, sp))
10380 if (mini_class_is_system_array (cmethod->klass)) {
10381 *sp = emit_get_rgctx_method (cfg, context_used,
10382 cmethod, MONO_RGCTX_INFO_METHOD);
10384 /* Avoid varargs in the common case */
10385 if (fsig->param_count == 1)
10386 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
10387 else if (fsig->param_count == 2)
10388 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
10389 else if (fsig->param_count == 3)
10390 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
10391 else if (fsig->param_count == 4)
10392 alloc = mono_emit_jit_icall (cfg, mono_array_new_4, sp);
10394 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
10395 } else if (cmethod->string_ctor) {
10396 g_assert (!context_used);
10397 g_assert (!vtable_arg);
10398 /* we simply pass a null pointer */
10399 EMIT_NEW_PCONST (cfg, *sp, NULL);
10400 /* now call the string ctor */
10401 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, NULL, NULL, NULL);
10403 if (cmethod->klass->valuetype) {
10404 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
10405 emit_init_rvar (cfg, iargs [0]->dreg, &cmethod->klass->byval_arg);
10406 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
10411 * The code generated by mini_emit_virtual_call () expects
10412 * iargs [0] to be a boxed instance, but luckily the vcall
10413 * will be transformed into a normal call there.
10415 } else if (context_used) {
10416 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
10419 MonoVTable *vtable = NULL;
10421 if (!cfg->compile_aot)
10422 vtable = mono_class_vtable (cfg->domain, cmethod->klass);
10423 CHECK_TYPELOAD (cmethod->klass);
10426 * TypeInitializationExceptions thrown from the mono_runtime_class_init
10427 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
10428 * As a workaround, we call class cctors before allocating objects.
10430 if (mini_field_access_needs_cctor_run (cfg, method, cmethod->klass, vtable) && !(g_slist_find (class_inits, cmethod->klass))) {
10431 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, cmethod->klass, helper_sig_class_init_trampoline, NULL);
10432 if (cfg->verbose_level > 2)
10433 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
10434 class_inits = g_slist_prepend (class_inits, cmethod->klass);
10437 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
10440 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
10443 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
10445 /* Now call the actual ctor */
10446 handle_ctor_call (cfg, cmethod, fsig, context_used, sp, ip, &bblock, &inline_costs);
10447 CHECK_CFG_EXCEPTION;
10450 if (alloc == NULL) {
10452 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
10453 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
10461 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip - header->code)))
10462 emit_seq_point (cfg, method, ip, FALSE, TRUE);
10465 case CEE_CASTCLASS:
10469 token = read32 (ip + 1);
10470 klass = mini_get_class (method, token, generic_context);
10471 CHECK_TYPELOAD (klass);
10472 if (sp [0]->type != STACK_OBJ)
10475 ins = handle_castclass (cfg, klass, *sp, ip, &bblock, &inline_costs);
10476 CHECK_CFG_EXCEPTION;
10485 token = read32 (ip + 1);
10486 klass = mini_get_class (method, token, generic_context);
10487 CHECK_TYPELOAD (klass);
10488 if (sp [0]->type != STACK_OBJ)
10491 context_used = mini_class_check_context_used (cfg, klass);
10493 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
10494 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
10495 MonoInst *args [3];
10501 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
10504 if (cfg->compile_aot)
10505 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
10507 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
10509 *sp++ = mono_emit_method_call (cfg, mono_isinst, args, NULL);
10512 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
10513 MonoMethod *mono_isinst;
10514 MonoInst *iargs [1];
10517 mono_isinst = mono_marshal_get_isinst (klass);
10518 iargs [0] = sp [0];
10520 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
10521 iargs, ip, cfg->real_offset, TRUE, &bblock);
10522 CHECK_CFG_EXCEPTION;
10523 g_assert (costs > 0);
10526 cfg->real_offset += 5;
10530 inline_costs += costs;
10533 ins = handle_isinst (cfg, klass, *sp, context_used);
10534 CHECK_CFG_EXCEPTION;
10541 case CEE_UNBOX_ANY: {
10542 MonoInst *res, *addr;
10547 token = read32 (ip + 1);
10548 klass = mini_get_class (method, token, generic_context);
10549 CHECK_TYPELOAD (klass);
10551 mono_save_token_info (cfg, image, token, klass);
10553 context_used = mini_class_check_context_used (cfg, klass);
10555 if (mini_is_gsharedvt_klass (cfg, klass)) {
10556 res = handle_unbox_gsharedvt (cfg, klass, *sp, &bblock);
10558 } else if (generic_class_is_reference_type (cfg, klass)) {
10559 res = handle_castclass (cfg, klass, *sp, ip, &bblock, &inline_costs);
10560 CHECK_CFG_EXCEPTION;
10561 } else if (mono_class_is_nullable (klass)) {
10562 res = handle_unbox_nullable (cfg, *sp, klass, context_used);
10564 addr = handle_unbox (cfg, klass, sp, context_used);
10566 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
10577 MonoClass *enum_class;
10578 MonoMethod *has_flag;
10584 token = read32 (ip + 1);
10585 klass = mini_get_class (method, token, generic_context);
10586 CHECK_TYPELOAD (klass);
10588 mono_save_token_info (cfg, image, token, klass);
10590 context_used = mini_class_check_context_used (cfg, klass);
10592 if (generic_class_is_reference_type (cfg, klass)) {
10598 if (klass == mono_defaults.void_class)
10600 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
10602 /* frequent check in generic code: box (struct), brtrue */
10607 * <push int/long ptr>
10610 * constrained. MyFlags
10611 * callvirt instace bool class [mscorlib] System.Enum::HasFlag (class [mscorlib] System.Enum)
10613 * If we find this sequence and the operand types on box and constrained
10614 * are equal, we can emit a specialized instruction sequence instead of
10615 * the very slow HasFlag () call.
10617 if ((cfg->opt & MONO_OPT_INTRINS) &&
10618 /* Cheap checks first. */
10619 ip + 5 + 6 + 5 < end &&
10620 ip [5] == CEE_PREFIX1 &&
10621 ip [6] == CEE_CONSTRAINED_ &&
10622 ip [11] == CEE_CALLVIRT &&
10623 ip_in_bb (cfg, bblock, ip + 5 + 6 + 5) &&
10624 mono_class_is_enum (klass) &&
10625 (enum_class = mini_get_class (method, read32 (ip + 7), generic_context)) &&
10626 (has_flag = mini_get_method (cfg, method, read32 (ip + 12), NULL, generic_context)) &&
10627 has_flag->klass == mono_defaults.enum_class &&
10628 !strcmp (has_flag->name, "HasFlag") &&
10629 has_flag->signature->hasthis &&
10630 has_flag->signature->param_count == 1) {
10631 CHECK_TYPELOAD (enum_class);
10633 if (enum_class == klass) {
10634 MonoInst *enum_this, *enum_flag;
10639 enum_this = sp [0];
10640 enum_flag = sp [1];
10642 *sp++ = handle_enum_has_flag (cfg, klass, enum_this, enum_flag);
10647 // FIXME: LLVM can't handle the inconsistent bb linking
10648 if (!mono_class_is_nullable (klass) &&
10649 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) &&
10650 (ip [5] == CEE_BRTRUE ||
10651 ip [5] == CEE_BRTRUE_S ||
10652 ip [5] == CEE_BRFALSE ||
10653 ip [5] == CEE_BRFALSE_S)) {
10654 gboolean is_true = ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S;
10656 MonoBasicBlock *true_bb, *false_bb;
10660 if (cfg->verbose_level > 3) {
10661 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
10662 printf ("<box+brtrue opt>\n");
10667 case CEE_BRFALSE_S:
10670 target = ip + 1 + (signed char)(*ip);
10677 target = ip + 4 + (gint)(read32 (ip));
10681 g_assert_not_reached ();
10685 * We need to link both bblocks, since it is needed for handling stack
10686 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
10687 * Branching to only one of them would lead to inconsistencies, so
10688 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
10690 GET_BBLOCK (cfg, true_bb, target);
10691 GET_BBLOCK (cfg, false_bb, ip);
10693 mono_link_bblock (cfg, cfg->cbb, true_bb);
10694 mono_link_bblock (cfg, cfg->cbb, false_bb);
10696 if (sp != stack_start) {
10697 handle_stack_args (cfg, stack_start, sp - stack_start);
10699 CHECK_UNVERIFIABLE (cfg);
10702 if (COMPILE_LLVM (cfg)) {
10703 dreg = alloc_ireg (cfg);
10704 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
10705 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
10707 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
10709 /* The JIT can't eliminate the iconst+compare */
10710 MONO_INST_NEW (cfg, ins, OP_BR);
10711 ins->inst_target_bb = is_true ? true_bb : false_bb;
10712 MONO_ADD_INS (cfg->cbb, ins);
10715 start_new_bblock = 1;
10719 *sp++ = handle_box (cfg, val, klass, context_used, &bblock);
10721 CHECK_CFG_EXCEPTION;
10730 token = read32 (ip + 1);
10731 klass = mini_get_class (method, token, generic_context);
10732 CHECK_TYPELOAD (klass);
10734 mono_save_token_info (cfg, image, token, klass);
10736 context_used = mini_class_check_context_used (cfg, klass);
10738 if (mono_class_is_nullable (klass)) {
10741 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
10742 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
10746 ins = handle_unbox (cfg, klass, sp, context_used);
10759 MonoClassField *field;
10760 #ifndef DISABLE_REMOTING
10764 gboolean is_instance;
10766 gpointer addr = NULL;
10767 gboolean is_special_static;
10769 MonoInst *store_val = NULL;
10770 MonoInst *thread_ins;
10773 is_instance = (op == CEE_LDFLD || op == CEE_LDFLDA || op == CEE_STFLD);
10775 if (op == CEE_STFLD) {
10778 store_val = sp [1];
10783 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
10785 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
10788 if (op == CEE_STSFLD) {
10791 store_val = sp [0];
10796 token = read32 (ip + 1);
10797 if (method->wrapper_type != MONO_WRAPPER_NONE) {
10798 field = mono_method_get_wrapper_data (method, token);
10799 klass = field->parent;
10802 field = mono_field_from_token_checked (image, token, &klass, generic_context, &cfg->error);
10805 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
10806 FIELD_ACCESS_FAILURE (method, field);
10807 mono_class_init (klass);
10809 if (is_instance && *ip != CEE_LDFLDA && is_magic_tls_access (field))
10812 /* if the class is Critical then transparent code cannot access it's fields */
10813 if (!is_instance && mono_security_core_clr_enabled ())
10814 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
10816 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
10817 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
10818 if (mono_security_core_clr_enabled ())
10819 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
10823 * LDFLD etc. is usable on static fields as well, so convert those cases to
10826 if (is_instance && field->type->attrs & FIELD_ATTRIBUTE_STATIC) {
10838 g_assert_not_reached ();
10840 is_instance = FALSE;
10843 context_used = mini_class_check_context_used (cfg, klass);
10845 /* INSTANCE CASE */
10847 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
10848 if (op == CEE_STFLD) {
10849 if (target_type_is_incompatible (cfg, field->type, sp [1]))
10851 #ifndef DISABLE_REMOTING
10852 if ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class) {
10853 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
10854 MonoInst *iargs [5];
10856 GSHAREDVT_FAILURE (op);
10858 iargs [0] = sp [0];
10859 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10860 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
10861 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
10863 iargs [4] = sp [1];
10865 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
10866 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
10867 iargs, ip, cfg->real_offset, TRUE, &bblock);
10868 CHECK_CFG_EXCEPTION;
10869 g_assert (costs > 0);
10871 cfg->real_offset += 5;
10873 inline_costs += costs;
10875 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
10882 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
10884 if (mini_is_gsharedvt_klass (cfg, klass)) {
10885 MonoInst *offset_ins;
10887 context_used = mini_class_check_context_used (cfg, klass);
10889 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10890 dreg = alloc_ireg_mp (cfg);
10891 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10892 /* The decomposition will call mini_emit_stobj () which will emit a wbarrier if needed */
10893 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, dreg, 0, sp [1]->dreg);
10895 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
10897 if (sp [0]->opcode != OP_LDADDR)
10898 store->flags |= MONO_INST_FAULT;
10900 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
10901 /* insert call to write barrier */
10905 dreg = alloc_ireg_mp (cfg);
10906 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
10907 emit_write_barrier (cfg, ptr, sp [1]);
10910 store->flags |= ins_flag;
10917 #ifndef DISABLE_REMOTING
10918 if (is_instance && ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class)) {
10919 MonoMethod *wrapper = (op == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
10920 MonoInst *iargs [4];
10922 GSHAREDVT_FAILURE (op);
10924 iargs [0] = sp [0];
10925 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10926 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
10927 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
10928 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
10929 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
10930 iargs, ip, cfg->real_offset, TRUE, &bblock);
10931 CHECK_CFG_EXCEPTION;
10932 g_assert (costs > 0);
10934 cfg->real_offset += 5;
10938 inline_costs += costs;
10940 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
10946 if (sp [0]->type == STACK_VTYPE) {
10949 /* Have to compute the address of the variable */
10951 var = get_vreg_to_inst (cfg, sp [0]->dreg);
10953 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
10955 g_assert (var->klass == klass);
10957 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
10961 if (op == CEE_LDFLDA) {
10962 if (is_magic_tls_access (field)) {
10963 GSHAREDVT_FAILURE (*ip);
10965 *sp++ = create_magic_tls_access (cfg, field, &cached_tls_addr, ins);
10967 if (sp [0]->type == STACK_OBJ) {
10968 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
10969 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
10972 dreg = alloc_ireg_mp (cfg);
10974 if (mini_is_gsharedvt_klass (cfg, klass)) {
10975 MonoInst *offset_ins;
10977 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10978 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10980 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
10982 ins->klass = mono_class_from_mono_type (field->type);
10983 ins->type = STACK_MP;
10989 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
10991 if (mini_is_gsharedvt_klass (cfg, klass)) {
10992 MonoInst *offset_ins;
10994 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10995 dreg = alloc_ireg_mp (cfg);
10996 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10997 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, dreg, 0);
10999 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
11001 load->flags |= ins_flag;
11002 if (sp [0]->opcode != OP_LDADDR)
11003 load->flags |= MONO_INST_FAULT;
11017 * We can only support shared generic static
11018 * field access on architectures where the
11019 * trampoline code has been extended to handle
11020 * the generic class init.
11022 #ifndef MONO_ARCH_VTABLE_REG
11023 GENERIC_SHARING_FAILURE (op);
11026 context_used = mini_class_check_context_used (cfg, klass);
11028 ftype = mono_field_get_type (field);
11030 if (ftype->attrs & FIELD_ATTRIBUTE_LITERAL)
11033 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
11034 * to be called here.
11036 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
11037 mono_class_vtable (cfg->domain, klass);
11038 CHECK_TYPELOAD (klass);
11040 mono_domain_lock (cfg->domain);
11041 if (cfg->domain->special_static_fields)
11042 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
11043 mono_domain_unlock (cfg->domain);
11045 is_special_static = mono_class_field_is_special_static (field);
11047 if (is_special_static && ((gsize)addr & 0x80000000) == 0)
11048 thread_ins = mono_get_thread_intrinsic (cfg);
11052 /* Generate IR to compute the field address */
11053 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && thread_ins && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
11055 * Fast access to TLS data
11056 * Inline version of get_thread_static_data () in
11060 int idx, static_data_reg, array_reg, dreg;
11062 GSHAREDVT_FAILURE (op);
11064 // offset &= 0x7fffffff;
11065 // idx = (offset >> 24) - 1;
11066 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
11067 MONO_ADD_INS (cfg->cbb, thread_ins);
11068 static_data_reg = alloc_ireg (cfg);
11069 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, MONO_STRUCT_OFFSET (MonoInternalThread, static_data));
11071 if (cfg->compile_aot) {
11072 int offset_reg, offset2_reg, idx_reg;
11074 /* For TLS variables, this will return the TLS offset */
11075 EMIT_NEW_SFLDACONST (cfg, ins, field);
11076 offset_reg = ins->dreg;
11077 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
11078 idx_reg = alloc_ireg (cfg);
11079 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
11080 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
11081 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
11082 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
11083 array_reg = alloc_ireg (cfg);
11084 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
11085 offset2_reg = alloc_ireg (cfg);
11086 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
11087 dreg = alloc_ireg (cfg);
11088 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
11090 offset = (gsize)addr & 0x7fffffff;
11091 idx = (offset >> 24) - 1;
11093 array_reg = alloc_ireg (cfg);
11094 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
11095 dreg = alloc_ireg (cfg);
11096 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, (offset & 0xffffff));
11098 } else if ((cfg->opt & MONO_OPT_SHARED) ||
11099 (cfg->compile_aot && is_special_static) ||
11100 (context_used && is_special_static)) {
11101 MonoInst *iargs [2];
11103 g_assert (field->parent);
11104 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11105 if (context_used) {
11106 iargs [1] = emit_get_rgctx_field (cfg, context_used,
11107 field, MONO_RGCTX_INFO_CLASS_FIELD);
11109 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
11111 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
11112 } else if (context_used) {
11113 MonoInst *static_data;
11116 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
11117 method->klass->name_space, method->klass->name, method->name,
11118 depth, field->offset);
11121 if (mono_class_needs_cctor_run (klass, method))
11122 emit_generic_class_init (cfg, klass);
11125 * The pointer we're computing here is
11127 * super_info.static_data + field->offset
11129 static_data = emit_get_rgctx_klass (cfg, context_used,
11130 klass, MONO_RGCTX_INFO_STATIC_DATA);
11132 if (mini_is_gsharedvt_klass (cfg, klass)) {
11133 MonoInst *offset_ins;
11135 offset_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET);
11136 dreg = alloc_ireg_mp (cfg);
11137 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, static_data->dreg, offset_ins->dreg);
11138 } else if (field->offset == 0) {
11141 int addr_reg = mono_alloc_preg (cfg);
11142 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
11144 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
11145 MonoInst *iargs [2];
11147 g_assert (field->parent);
11148 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11149 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
11150 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
11152 MonoVTable *vtable = NULL;
11154 if (!cfg->compile_aot)
11155 vtable = mono_class_vtable (cfg->domain, klass);
11156 CHECK_TYPELOAD (klass);
11159 if (mini_field_access_needs_cctor_run (cfg, method, klass, vtable)) {
11160 if (!(g_slist_find (class_inits, klass))) {
11161 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, klass, helper_sig_class_init_trampoline, NULL);
11162 if (cfg->verbose_level > 2)
11163 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
11164 class_inits = g_slist_prepend (class_inits, klass);
11167 if (cfg->run_cctors) {
11169 /* This makes so that inline cannot trigger */
11170 /* .cctors: too many apps depend on them */
11171 /* running with a specific order... */
11173 if (! vtable->initialized)
11174 INLINE_FAILURE ("class init");
11175 ex = mono_runtime_class_init_full (vtable, FALSE);
11177 set_exception_object (cfg, ex);
11178 goto exception_exit;
11182 if (cfg->compile_aot)
11183 EMIT_NEW_SFLDACONST (cfg, ins, field);
11186 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
11188 EMIT_NEW_PCONST (cfg, ins, addr);
11191 MonoInst *iargs [1];
11192 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
11193 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
11197 /* Generate IR to do the actual load/store operation */
11199 if ((op == CEE_STFLD || op == CEE_STSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
11200 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
11201 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
11204 if (op == CEE_LDSFLDA) {
11205 ins->klass = mono_class_from_mono_type (ftype);
11206 ins->type = STACK_PTR;
11208 } else if (op == CEE_STSFLD) {
11211 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, ftype, ins->dreg, 0, store_val->dreg);
11212 store->flags |= ins_flag;
11214 gboolean is_const = FALSE;
11215 MonoVTable *vtable = NULL;
11216 gpointer addr = NULL;
11218 if (!context_used) {
11219 vtable = mono_class_vtable (cfg->domain, klass);
11220 CHECK_TYPELOAD (klass);
11222 if ((ftype->attrs & FIELD_ATTRIBUTE_INIT_ONLY) && (((addr = mono_aot_readonly_field_override (field)) != NULL) ||
11223 (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) && vtable->initialized))) {
11224 int ro_type = ftype->type;
11226 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
11227 if (ro_type == MONO_TYPE_VALUETYPE && ftype->data.klass->enumtype) {
11228 ro_type = mono_class_enum_basetype (ftype->data.klass)->type;
11231 GSHAREDVT_FAILURE (op);
11233 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
11236 case MONO_TYPE_BOOLEAN:
11238 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
11242 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
11245 case MONO_TYPE_CHAR:
11247 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
11251 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
11256 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
11260 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
11265 case MONO_TYPE_PTR:
11266 case MONO_TYPE_FNPTR:
11267 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
11268 type_to_eval_stack_type ((cfg), field->type, *sp);
11271 case MONO_TYPE_STRING:
11272 case MONO_TYPE_OBJECT:
11273 case MONO_TYPE_CLASS:
11274 case MONO_TYPE_SZARRAY:
11275 case MONO_TYPE_ARRAY:
11276 if (!mono_gc_is_moving ()) {
11277 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
11278 type_to_eval_stack_type ((cfg), field->type, *sp);
11286 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
11291 case MONO_TYPE_VALUETYPE:
11301 CHECK_STACK_OVF (1);
11303 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
11304 load->flags |= ins_flag;
11310 if ((op == CEE_LDFLD || op == CEE_LDSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
11311 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
11312 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
11323 token = read32 (ip + 1);
11324 klass = mini_get_class (method, token, generic_context);
11325 CHECK_TYPELOAD (klass);
11326 if (ins_flag & MONO_INST_VOLATILE) {
11327 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
11328 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
11330 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
11331 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
11332 ins->flags |= ins_flag;
11333 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
11334 generic_class_is_reference_type (cfg, klass)) {
11335 /* insert call to write barrier */
11336 emit_write_barrier (cfg, sp [0], sp [1]);
11348 const char *data_ptr;
11350 guint32 field_token;
11356 token = read32 (ip + 1);
11358 klass = mini_get_class (method, token, generic_context);
11359 CHECK_TYPELOAD (klass);
11361 context_used = mini_class_check_context_used (cfg, klass);
11363 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
11364 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_OVF_U4);
11365 ins->sreg1 = sp [0]->dreg;
11366 ins->type = STACK_I4;
11367 ins->dreg = alloc_ireg (cfg);
11368 MONO_ADD_INS (cfg->cbb, ins);
11369 *sp = mono_decompose_opcode (cfg, ins, &bblock);
11372 if (context_used) {
11373 MonoInst *args [3];
11374 MonoClass *array_class = mono_array_class_get (klass, 1);
11375 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class);
11377 /* FIXME: Use OP_NEWARR and decompose later to help abcrem */
11380 args [0] = emit_get_rgctx_klass (cfg, context_used,
11381 array_class, MONO_RGCTX_INFO_VTABLE);
11386 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
11388 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
11390 if (cfg->opt & MONO_OPT_SHARED) {
11391 /* Decompose now to avoid problems with references to the domainvar */
11392 MonoInst *iargs [3];
11394 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11395 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
11396 iargs [2] = sp [0];
11398 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
11400 /* Decompose later since it is needed by abcrem */
11401 MonoClass *array_type = mono_array_class_get (klass, 1);
11402 mono_class_vtable (cfg->domain, array_type);
11403 CHECK_TYPELOAD (array_type);
11405 MONO_INST_NEW (cfg, ins, OP_NEWARR);
11406 ins->dreg = alloc_ireg_ref (cfg);
11407 ins->sreg1 = sp [0]->dreg;
11408 ins->inst_newa_class = klass;
11409 ins->type = STACK_OBJ;
11410 ins->klass = array_type;
11411 MONO_ADD_INS (cfg->cbb, ins);
11412 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
11413 cfg->cbb->has_array_access = TRUE;
11415 /* Needed so mono_emit_load_get_addr () gets called */
11416 mono_get_got_var (cfg);
11426 * we inline/optimize the initialization sequence if possible.
11427 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
11428 * for small sizes open code the memcpy
11429 * ensure the rva field is big enough
11431 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
11432 MonoMethod *memcpy_method = get_memcpy_method ();
11433 MonoInst *iargs [3];
11434 int add_reg = alloc_ireg_mp (cfg);
11436 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, MONO_STRUCT_OFFSET (MonoArray, vector));
11437 if (cfg->compile_aot) {
11438 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
11440 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
11442 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
11443 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
11452 if (sp [0]->type != STACK_OBJ)
11455 MONO_INST_NEW (cfg, ins, OP_LDLEN);
11456 ins->dreg = alloc_preg (cfg);
11457 ins->sreg1 = sp [0]->dreg;
11458 ins->type = STACK_I4;
11459 /* This flag will be inherited by the decomposition */
11460 ins->flags |= MONO_INST_FAULT;
11461 MONO_ADD_INS (cfg->cbb, ins);
11462 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
11463 cfg->cbb->has_array_access = TRUE;
11471 if (sp [0]->type != STACK_OBJ)
11474 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11476 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11477 CHECK_TYPELOAD (klass);
11478 /* we need to make sure that this array is exactly the type it needs
11479 * to be for correctness. the wrappers are lax with their usage
11480 * so we need to ignore them here
11482 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
11483 MonoClass *array_class = mono_array_class_get (klass, 1);
11484 mini_emit_check_array_type (cfg, sp [0], array_class);
11485 CHECK_TYPELOAD (array_class);
11489 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11494 case CEE_LDELEM_I1:
11495 case CEE_LDELEM_U1:
11496 case CEE_LDELEM_I2:
11497 case CEE_LDELEM_U2:
11498 case CEE_LDELEM_I4:
11499 case CEE_LDELEM_U4:
11500 case CEE_LDELEM_I8:
11502 case CEE_LDELEM_R4:
11503 case CEE_LDELEM_R8:
11504 case CEE_LDELEM_REF: {
11510 if (*ip == CEE_LDELEM) {
11512 token = read32 (ip + 1);
11513 klass = mini_get_class (method, token, generic_context);
11514 CHECK_TYPELOAD (klass);
11515 mono_class_init (klass);
11518 klass = array_access_to_klass (*ip);
11520 if (sp [0]->type != STACK_OBJ)
11523 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11525 if (mini_is_gsharedvt_variable_klass (cfg, klass)) {
11526 // FIXME-VT: OP_ICONST optimization
11527 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11528 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
11529 ins->opcode = OP_LOADV_MEMBASE;
11530 } else if (sp [1]->opcode == OP_ICONST) {
11531 int array_reg = sp [0]->dreg;
11532 int index_reg = sp [1]->dreg;
11533 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
11535 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
11536 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
11538 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11539 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
11542 if (*ip == CEE_LDELEM)
11549 case CEE_STELEM_I1:
11550 case CEE_STELEM_I2:
11551 case CEE_STELEM_I4:
11552 case CEE_STELEM_I8:
11553 case CEE_STELEM_R4:
11554 case CEE_STELEM_R8:
11555 case CEE_STELEM_REF:
11560 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11562 if (*ip == CEE_STELEM) {
11564 token = read32 (ip + 1);
11565 klass = mini_get_class (method, token, generic_context);
11566 CHECK_TYPELOAD (klass);
11567 mono_class_init (klass);
11570 klass = array_access_to_klass (*ip);
11572 if (sp [0]->type != STACK_OBJ)
11575 emit_array_store (cfg, klass, sp, TRUE);
11577 if (*ip == CEE_STELEM)
11584 case CEE_CKFINITE: {
11588 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
11589 ins->sreg1 = sp [0]->dreg;
11590 ins->dreg = alloc_freg (cfg);
11591 ins->type = STACK_R8;
11592 MONO_ADD_INS (bblock, ins);
11594 *sp++ = mono_decompose_opcode (cfg, ins, &bblock);
11599 case CEE_REFANYVAL: {
11600 MonoInst *src_var, *src;
11602 int klass_reg = alloc_preg (cfg);
11603 int dreg = alloc_preg (cfg);
11605 GSHAREDVT_FAILURE (*ip);
11608 MONO_INST_NEW (cfg, ins, *ip);
11611 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11612 CHECK_TYPELOAD (klass);
11614 context_used = mini_class_check_context_used (cfg, klass);
11617 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
11619 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
11620 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
11621 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass));
11623 if (context_used) {
11624 MonoInst *klass_ins;
11626 klass_ins = emit_get_rgctx_klass (cfg, context_used,
11627 klass, MONO_RGCTX_INFO_KLASS);
11630 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
11631 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
11633 mini_emit_class_check (cfg, klass_reg, klass);
11635 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value));
11636 ins->type = STACK_MP;
11641 case CEE_MKREFANY: {
11642 MonoInst *loc, *addr;
11644 GSHAREDVT_FAILURE (*ip);
11647 MONO_INST_NEW (cfg, ins, *ip);
11650 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11651 CHECK_TYPELOAD (klass);
11653 context_used = mini_class_check_context_used (cfg, klass);
11655 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
11656 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
11658 if (context_used) {
11659 MonoInst *const_ins;
11660 int type_reg = alloc_preg (cfg);
11662 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
11663 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
11664 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
11665 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
11666 } else if (cfg->compile_aot) {
11667 int const_reg = alloc_preg (cfg);
11668 int type_reg = alloc_preg (cfg);
11670 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
11671 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
11672 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
11673 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
11675 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
11676 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), klass);
11678 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
11680 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
11681 ins->type = STACK_VTYPE;
11682 ins->klass = mono_defaults.typed_reference_class;
11687 case CEE_LDTOKEN: {
11689 MonoClass *handle_class;
11691 CHECK_STACK_OVF (1);
11694 n = read32 (ip + 1);
11696 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
11697 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
11698 handle = mono_method_get_wrapper_data (method, n);
11699 handle_class = mono_method_get_wrapper_data (method, n + 1);
11700 if (handle_class == mono_defaults.typehandle_class)
11701 handle = &((MonoClass*)handle)->byval_arg;
11704 handle = mono_ldtoken_checked (image, n, &handle_class, generic_context, &cfg->error);
11709 mono_class_init (handle_class);
11710 if (cfg->generic_sharing_context) {
11711 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
11712 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
11713 /* This case handles ldtoken
11714 of an open type, like for
11717 } else if (handle_class == mono_defaults.typehandle_class) {
11718 context_used = mini_class_check_context_used (cfg, mono_class_from_mono_type (handle));
11719 } else if (handle_class == mono_defaults.fieldhandle_class)
11720 context_used = mini_class_check_context_used (cfg, ((MonoClassField*)handle)->parent);
11721 else if (handle_class == mono_defaults.methodhandle_class)
11722 context_used = mini_method_check_context_used (cfg, handle);
11724 g_assert_not_reached ();
11727 if ((cfg->opt & MONO_OPT_SHARED) &&
11728 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
11729 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
11730 MonoInst *addr, *vtvar, *iargs [3];
11731 int method_context_used;
11733 method_context_used = mini_method_check_context_used (cfg, method);
11735 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
11737 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
11738 EMIT_NEW_ICONST (cfg, iargs [1], n);
11739 if (method_context_used) {
11740 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
11741 method, MONO_RGCTX_INFO_METHOD);
11742 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
11744 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
11745 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
11747 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11749 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
11751 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11753 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
11754 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
11755 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
11756 (cmethod->klass == mono_defaults.systemtype_class) &&
11757 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
11758 MonoClass *tclass = mono_class_from_mono_type (handle);
11760 mono_class_init (tclass);
11761 if (context_used) {
11762 ins = emit_get_rgctx_klass (cfg, context_used,
11763 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
11764 } else if (cfg->compile_aot) {
11765 if (method->wrapper_type) {
11766 mono_error_init (&error); //got to do it since there are multiple conditionals below
11767 if (mono_class_get_checked (tclass->image, tclass->type_token, &error) == tclass && !generic_context) {
11768 /* Special case for static synchronized wrappers */
11769 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
11771 mono_error_cleanup (&error); /* FIXME don't swallow the error */
11772 /* FIXME: n is not a normal token */
11774 EMIT_NEW_PCONST (cfg, ins, NULL);
11777 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
11780 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
11782 ins->type = STACK_OBJ;
11783 ins->klass = cmethod->klass;
11786 MonoInst *addr, *vtvar;
11788 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
11790 if (context_used) {
11791 if (handle_class == mono_defaults.typehandle_class) {
11792 ins = emit_get_rgctx_klass (cfg, context_used,
11793 mono_class_from_mono_type (handle),
11794 MONO_RGCTX_INFO_TYPE);
11795 } else if (handle_class == mono_defaults.methodhandle_class) {
11796 ins = emit_get_rgctx_method (cfg, context_used,
11797 handle, MONO_RGCTX_INFO_METHOD);
11798 } else if (handle_class == mono_defaults.fieldhandle_class) {
11799 ins = emit_get_rgctx_field (cfg, context_used,
11800 handle, MONO_RGCTX_INFO_CLASS_FIELD);
11802 g_assert_not_reached ();
11804 } else if (cfg->compile_aot) {
11805 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n, generic_context);
11807 EMIT_NEW_PCONST (cfg, ins, handle);
11809 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11810 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
11811 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11821 MONO_INST_NEW (cfg, ins, OP_THROW);
11823 ins->sreg1 = sp [0]->dreg;
11825 bblock->out_of_line = TRUE;
11826 MONO_ADD_INS (bblock, ins);
11827 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
11828 MONO_ADD_INS (bblock, ins);
11831 link_bblock (cfg, bblock, end_bblock);
11832 start_new_bblock = 1;
11834 case CEE_ENDFINALLY:
11835 /* mono_save_seq_point_info () depends on this */
11836 if (sp != stack_start)
11837 emit_seq_point (cfg, method, ip, FALSE, FALSE);
11838 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
11839 MONO_ADD_INS (bblock, ins);
11841 start_new_bblock = 1;
11844 * Control will leave the method so empty the stack, otherwise
11845 * the next basic block will start with a nonempty stack.
11847 while (sp != stack_start) {
11852 case CEE_LEAVE_S: {
11855 if (*ip == CEE_LEAVE) {
11857 target = ip + 5 + (gint32)read32(ip + 1);
11860 target = ip + 2 + (signed char)(ip [1]);
11863 /* empty the stack */
11864 while (sp != stack_start) {
11869 * If this leave statement is in a catch block, check for a
11870 * pending exception, and rethrow it if necessary.
11871 * We avoid doing this in runtime invoke wrappers, since those are called
11872 * by native code which excepts the wrapper to catch all exceptions.
11874 for (i = 0; i < header->num_clauses; ++i) {
11875 MonoExceptionClause *clause = &header->clauses [i];
11878 * Use <= in the final comparison to handle clauses with multiple
11879 * leave statements, like in bug #78024.
11880 * The ordering of the exception clauses guarantees that we find the
11881 * innermost clause.
11883 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
11885 MonoBasicBlock *dont_throw;
11890 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
11893 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
11895 NEW_BBLOCK (cfg, dont_throw);
11898 * Currently, we always rethrow the abort exception, despite the
11899 * fact that this is not correct. See thread6.cs for an example.
11900 * But propagating the abort exception is more important than
11901 * getting the sematics right.
11903 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
11904 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
11905 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
11907 MONO_START_BB (cfg, dont_throw);
11912 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
11914 MonoExceptionClause *clause;
11916 for (tmp = handlers; tmp; tmp = tmp->next) {
11917 clause = tmp->data;
11918 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
11920 link_bblock (cfg, bblock, tblock);
11921 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
11922 ins->inst_target_bb = tblock;
11923 ins->inst_eh_block = clause;
11924 MONO_ADD_INS (bblock, ins);
11925 bblock->has_call_handler = 1;
11926 if (COMPILE_LLVM (cfg)) {
11927 MonoBasicBlock *target_bb;
11930 * Link the finally bblock with the target, since it will
11931 * conceptually branch there.
11932 * FIXME: Have to link the bblock containing the endfinally.
11934 GET_BBLOCK (cfg, target_bb, target);
11935 link_bblock (cfg, tblock, target_bb);
11938 g_list_free (handlers);
11941 MONO_INST_NEW (cfg, ins, OP_BR);
11942 MONO_ADD_INS (bblock, ins);
11943 GET_BBLOCK (cfg, tblock, target);
11944 link_bblock (cfg, bblock, tblock);
11945 ins->inst_target_bb = tblock;
11946 start_new_bblock = 1;
11948 if (*ip == CEE_LEAVE)
11957 * Mono specific opcodes
11959 case MONO_CUSTOM_PREFIX: {
11961 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
11965 case CEE_MONO_ICALL: {
11967 MonoJitICallInfo *info;
11969 token = read32 (ip + 2);
11970 func = mono_method_get_wrapper_data (method, token);
11971 info = mono_find_jit_icall_by_addr (func);
11973 g_error ("Could not find icall address in wrapper %s", mono_method_full_name (method, 1));
11976 CHECK_STACK (info->sig->param_count);
11977 sp -= info->sig->param_count;
11979 ins = mono_emit_jit_icall (cfg, info->func, sp);
11980 if (!MONO_TYPE_IS_VOID (info->sig->ret))
11984 inline_costs += 10 * num_calls++;
11988 case CEE_MONO_LDPTR: {
11991 CHECK_STACK_OVF (1);
11993 token = read32 (ip + 2);
11995 ptr = mono_method_get_wrapper_data (method, token);
11996 /* FIXME: Generalize this */
11997 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
11998 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
12003 EMIT_NEW_PCONST (cfg, ins, ptr);
12006 inline_costs += 10 * num_calls++;
12007 /* Can't embed random pointers into AOT code */
12011 case CEE_MONO_JIT_ICALL_ADDR: {
12012 MonoJitICallInfo *callinfo;
12015 CHECK_STACK_OVF (1);
12017 token = read32 (ip + 2);
12019 ptr = mono_method_get_wrapper_data (method, token);
12020 callinfo = mono_find_jit_icall_by_addr (ptr);
12021 g_assert (callinfo);
12022 EMIT_NEW_JIT_ICALL_ADDRCONST (cfg, ins, (char*)callinfo->name);
12025 inline_costs += 10 * num_calls++;
12028 case CEE_MONO_ICALL_ADDR: {
12029 MonoMethod *cmethod;
12032 CHECK_STACK_OVF (1);
12034 token = read32 (ip + 2);
12036 cmethod = mono_method_get_wrapper_data (method, token);
12038 if (cfg->compile_aot) {
12039 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
12041 ptr = mono_lookup_internal_call (cmethod);
12043 EMIT_NEW_PCONST (cfg, ins, ptr);
12049 case CEE_MONO_VTADDR: {
12050 MonoInst *src_var, *src;
12056 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
12057 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
12062 case CEE_MONO_NEWOBJ: {
12063 MonoInst *iargs [2];
12065 CHECK_STACK_OVF (1);
12067 token = read32 (ip + 2);
12068 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12069 mono_class_init (klass);
12070 NEW_DOMAINCONST (cfg, iargs [0]);
12071 MONO_ADD_INS (cfg->cbb, iargs [0]);
12072 NEW_CLASSCONST (cfg, iargs [1], klass);
12073 MONO_ADD_INS (cfg->cbb, iargs [1]);
12074 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
12076 inline_costs += 10 * num_calls++;
12079 case CEE_MONO_OBJADDR:
12082 MONO_INST_NEW (cfg, ins, OP_MOVE);
12083 ins->dreg = alloc_ireg_mp (cfg);
12084 ins->sreg1 = sp [0]->dreg;
12085 ins->type = STACK_MP;
12086 MONO_ADD_INS (cfg->cbb, ins);
12090 case CEE_MONO_LDNATIVEOBJ:
12092 * Similar to LDOBJ, but instead load the unmanaged
12093 * representation of the vtype to the stack.
12098 token = read32 (ip + 2);
12099 klass = mono_method_get_wrapper_data (method, token);
12100 g_assert (klass->valuetype);
12101 mono_class_init (klass);
12104 MonoInst *src, *dest, *temp;
12107 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
12108 temp->backend.is_pinvoke = 1;
12109 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
12110 mini_emit_stobj (cfg, dest, src, klass, TRUE);
12112 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
12113 dest->type = STACK_VTYPE;
12114 dest->klass = klass;
12120 case CEE_MONO_RETOBJ: {
12122 * Same as RET, but return the native representation of a vtype
12125 g_assert (cfg->ret);
12126 g_assert (mono_method_signature (method)->pinvoke);
12131 token = read32 (ip + 2);
12132 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12134 if (!cfg->vret_addr) {
12135 g_assert (cfg->ret_var_is_local);
12137 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
12139 EMIT_NEW_RETLOADA (cfg, ins);
12141 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
12143 if (sp != stack_start)
12146 MONO_INST_NEW (cfg, ins, OP_BR);
12147 ins->inst_target_bb = end_bblock;
12148 MONO_ADD_INS (bblock, ins);
12149 link_bblock (cfg, bblock, end_bblock);
12150 start_new_bblock = 1;
12154 case CEE_MONO_CISINST:
12155 case CEE_MONO_CCASTCLASS: {
12160 token = read32 (ip + 2);
12161 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12162 if (ip [1] == CEE_MONO_CISINST)
12163 ins = handle_cisinst (cfg, klass, sp [0]);
12165 ins = handle_ccastclass (cfg, klass, sp [0]);
12171 case CEE_MONO_SAVE_LMF:
12172 case CEE_MONO_RESTORE_LMF:
12173 #ifdef MONO_ARCH_HAVE_LMF_OPS
12174 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
12175 MONO_ADD_INS (bblock, ins);
12176 cfg->need_lmf_area = TRUE;
12180 case CEE_MONO_CLASSCONST:
12181 CHECK_STACK_OVF (1);
12183 token = read32 (ip + 2);
12184 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
12187 inline_costs += 10 * num_calls++;
12189 case CEE_MONO_NOT_TAKEN:
12190 bblock->out_of_line = TRUE;
12193 case CEE_MONO_TLS: {
12196 CHECK_STACK_OVF (1);
12198 key = (gint32)read32 (ip + 2);
12199 g_assert (key < TLS_KEY_NUM);
12201 ins = mono_create_tls_get (cfg, key);
12203 if (cfg->compile_aot) {
12205 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
12206 ins->dreg = alloc_preg (cfg);
12207 ins->type = STACK_PTR;
12209 g_assert_not_reached ();
12212 ins->type = STACK_PTR;
12213 MONO_ADD_INS (bblock, ins);
12218 case CEE_MONO_DYN_CALL: {
12219 MonoCallInst *call;
12221 /* It would be easier to call a trampoline, but that would put an
12222 * extra frame on the stack, confusing exception handling. So
12223 * implement it inline using an opcode for now.
12226 if (!cfg->dyn_call_var) {
12227 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
12228 /* prevent it from being register allocated */
12229 cfg->dyn_call_var->flags |= MONO_INST_VOLATILE;
12232 /* Has to use a call inst since it local regalloc expects it */
12233 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
12234 ins = (MonoInst*)call;
12236 ins->sreg1 = sp [0]->dreg;
12237 ins->sreg2 = sp [1]->dreg;
12238 MONO_ADD_INS (bblock, ins);
12240 cfg->param_area = MAX (cfg->param_area, MONO_ARCH_DYN_CALL_PARAM_AREA);
12243 inline_costs += 10 * num_calls++;
12247 case CEE_MONO_MEMORY_BARRIER: {
12249 emit_memory_barrier (cfg, (int)read32 (ip + 2));
12253 case CEE_MONO_JIT_ATTACH: {
12254 MonoInst *args [16], *domain_ins;
12255 MonoInst *ad_ins, *jit_tls_ins;
12256 MonoBasicBlock *next_bb = NULL, *call_bb = NULL;
12258 cfg->orig_domain_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
12260 EMIT_NEW_PCONST (cfg, ins, NULL);
12261 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
12263 ad_ins = mono_get_domain_intrinsic (cfg);
12264 jit_tls_ins = mono_get_jit_tls_intrinsic (cfg);
12266 if (MONO_ARCH_HAVE_TLS_GET && ad_ins && jit_tls_ins) {
12267 NEW_BBLOCK (cfg, next_bb);
12268 NEW_BBLOCK (cfg, call_bb);
12270 if (cfg->compile_aot) {
12271 /* AOT code is only used in the root domain */
12272 EMIT_NEW_PCONST (cfg, domain_ins, NULL);
12274 EMIT_NEW_PCONST (cfg, domain_ins, cfg->domain);
12276 MONO_ADD_INS (cfg->cbb, ad_ins);
12277 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, ad_ins->dreg, domain_ins->dreg);
12278 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, call_bb);
12280 MONO_ADD_INS (cfg->cbb, jit_tls_ins);
12281 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, jit_tls_ins->dreg, 0);
12282 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, call_bb);
12284 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, next_bb);
12285 MONO_START_BB (cfg, call_bb);
12288 if (cfg->compile_aot) {
12289 /* AOT code is only used in the root domain */
12290 EMIT_NEW_PCONST (cfg, args [0], NULL);
12292 EMIT_NEW_PCONST (cfg, args [0], cfg->domain);
12294 ins = mono_emit_jit_icall (cfg, mono_jit_thread_attach, args);
12295 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
12298 MONO_START_BB (cfg, next_bb);
12304 case CEE_MONO_JIT_DETACH: {
12305 MonoInst *args [16];
12307 /* Restore the original domain */
12308 dreg = alloc_ireg (cfg);
12309 EMIT_NEW_UNALU (cfg, args [0], OP_MOVE, dreg, cfg->orig_domain_var->dreg);
12310 mono_emit_jit_icall (cfg, mono_jit_set_domain, args);
12315 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
12321 case CEE_PREFIX1: {
12324 case CEE_ARGLIST: {
12325 /* somewhat similar to LDTOKEN */
12326 MonoInst *addr, *vtvar;
12327 CHECK_STACK_OVF (1);
12328 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
12330 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
12331 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
12333 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
12334 ins->type = STACK_VTYPE;
12335 ins->klass = mono_defaults.argumenthandle_class;
12345 MonoInst *cmp, *arg1, *arg2;
12353 * The following transforms:
12354 * CEE_CEQ into OP_CEQ
12355 * CEE_CGT into OP_CGT
12356 * CEE_CGT_UN into OP_CGT_UN
12357 * CEE_CLT into OP_CLT
12358 * CEE_CLT_UN into OP_CLT_UN
12360 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
12362 MONO_INST_NEW (cfg, ins, cmp->opcode);
12363 cmp->sreg1 = arg1->dreg;
12364 cmp->sreg2 = arg2->dreg;
12365 type_from_op (cfg, cmp, arg1, arg2);
12367 add_widen_op (cfg, cmp, &arg1, &arg2);
12368 if ((arg1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((arg1->type == STACK_PTR) || (arg1->type == STACK_OBJ) || (arg1->type == STACK_MP))))
12369 cmp->opcode = OP_LCOMPARE;
12370 else if (arg1->type == STACK_R4)
12371 cmp->opcode = OP_RCOMPARE;
12372 else if (arg1->type == STACK_R8)
12373 cmp->opcode = OP_FCOMPARE;
12375 cmp->opcode = OP_ICOMPARE;
12376 MONO_ADD_INS (bblock, cmp);
12377 ins->type = STACK_I4;
12378 ins->dreg = alloc_dreg (cfg, ins->type);
12379 type_from_op (cfg, ins, arg1, arg2);
12381 if (cmp->opcode == OP_FCOMPARE || cmp->opcode == OP_RCOMPARE) {
12383 * The backends expect the fceq opcodes to do the
12386 ins->sreg1 = cmp->sreg1;
12387 ins->sreg2 = cmp->sreg2;
12390 MONO_ADD_INS (bblock, ins);
12396 MonoInst *argconst;
12397 MonoMethod *cil_method;
12399 CHECK_STACK_OVF (1);
12401 n = read32 (ip + 2);
12402 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
12403 if (!cmethod || mono_loader_get_last_error ())
12405 mono_class_init (cmethod->klass);
12407 mono_save_token_info (cfg, image, n, cmethod);
12409 context_used = mini_method_check_context_used (cfg, cmethod);
12411 cil_method = cmethod;
12412 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
12413 METHOD_ACCESS_FAILURE (method, cil_method);
12415 if (mono_security_cas_enabled ()) {
12416 if (check_linkdemand (cfg, method, cmethod))
12417 INLINE_FAILURE ("linkdemand");
12418 CHECK_CFG_EXCEPTION;
12419 } else if (mono_security_core_clr_enabled ()) {
12420 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
12424 * Optimize the common case of ldftn+delegate creation
12426 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
12427 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
12428 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
12429 MonoInst *target_ins, *handle_ins;
12430 MonoMethod *invoke;
12431 int invoke_context_used;
12433 invoke = mono_get_delegate_invoke (ctor_method->klass);
12434 if (!invoke || !mono_method_signature (invoke))
12437 invoke_context_used = mini_method_check_context_used (cfg, invoke);
12439 target_ins = sp [-1];
12441 if (mono_security_core_clr_enabled ())
12442 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method, bblock, ip);
12444 if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
12445 /*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
12446 if (mono_method_signature (invoke)->param_count == mono_method_signature (cmethod)->param_count) {
12447 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0);
12448 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException");
12452 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE)
12453 /* FIXME: SGEN support */
12454 if (invoke_context_used == 0) {
12456 if (cfg->verbose_level > 3)
12457 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
12458 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, FALSE))) {
12461 CHECK_CFG_EXCEPTION;
12472 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
12473 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
12477 inline_costs += 10 * num_calls++;
12480 case CEE_LDVIRTFTN: {
12481 MonoInst *args [2];
12485 n = read32 (ip + 2);
12486 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
12487 if (!cmethod || mono_loader_get_last_error ())
12489 mono_class_init (cmethod->klass);
12491 context_used = mini_method_check_context_used (cfg, cmethod);
12493 if (mono_security_cas_enabled ()) {
12494 if (check_linkdemand (cfg, method, cmethod))
12495 INLINE_FAILURE ("linkdemand");
12496 CHECK_CFG_EXCEPTION;
12497 } else if (mono_security_core_clr_enabled ()) {
12498 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
12502 * Optimize the common case of ldvirtftn+delegate creation
12504 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ) && (ip > header->code && ip [-1] == CEE_DUP)) {
12505 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
12506 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
12507 MonoInst *target_ins, *handle_ins;
12508 MonoMethod *invoke;
12509 int invoke_context_used;
12511 invoke = mono_get_delegate_invoke (ctor_method->klass);
12512 if (!invoke || !mono_method_signature (invoke))
12515 invoke_context_used = mini_method_check_context_used (cfg, invoke);
12517 target_ins = sp [-1];
12519 if (mono_security_core_clr_enabled ())
12520 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method, bblock, ip);
12522 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE)
12523 /* FIXME: SGEN support */
12524 if (invoke_context_used == 0) {
12526 if (cfg->verbose_level > 3)
12527 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
12528 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, TRUE))) {
12531 CHECK_CFG_EXCEPTION;
12545 args [1] = emit_get_rgctx_method (cfg, context_used,
12546 cmethod, MONO_RGCTX_INFO_METHOD);
12549 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
12551 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
12554 inline_costs += 10 * num_calls++;
12558 CHECK_STACK_OVF (1);
12560 n = read16 (ip + 2);
12562 EMIT_NEW_ARGLOAD (cfg, ins, n);
12567 CHECK_STACK_OVF (1);
12569 n = read16 (ip + 2);
12571 NEW_ARGLOADA (cfg, ins, n);
12572 MONO_ADD_INS (cfg->cbb, ins);
12580 n = read16 (ip + 2);
12582 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
12584 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
12588 CHECK_STACK_OVF (1);
12590 n = read16 (ip + 2);
12592 EMIT_NEW_LOCLOAD (cfg, ins, n);
12597 unsigned char *tmp_ip;
12598 CHECK_STACK_OVF (1);
12600 n = read16 (ip + 2);
12603 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
12609 EMIT_NEW_LOCLOADA (cfg, ins, n);
12618 n = read16 (ip + 2);
12620 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
12622 emit_stloc_ir (cfg, sp, header, n);
12629 if (sp != stack_start)
12631 if (cfg->method != method)
12633 * Inlining this into a loop in a parent could lead to
12634 * stack overflows which is different behavior than the
12635 * non-inlined case, thus disable inlining in this case.
12637 INLINE_FAILURE("localloc");
12639 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
12640 ins->dreg = alloc_preg (cfg);
12641 ins->sreg1 = sp [0]->dreg;
12642 ins->type = STACK_PTR;
12643 MONO_ADD_INS (cfg->cbb, ins);
12645 cfg->flags |= MONO_CFG_HAS_ALLOCA;
12647 ins->flags |= MONO_INST_INIT;
12652 case CEE_ENDFILTER: {
12653 MonoExceptionClause *clause, *nearest;
12658 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
12660 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
12661 ins->sreg1 = (*sp)->dreg;
12662 MONO_ADD_INS (bblock, ins);
12663 start_new_bblock = 1;
12667 for (cc = 0; cc < header->num_clauses; ++cc) {
12668 clause = &header->clauses [cc];
12669 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
12670 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
12671 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset)))
12674 g_assert (nearest);
12675 if ((ip - header->code) != nearest->handler_offset)
12680 case CEE_UNALIGNED_:
12681 ins_flag |= MONO_INST_UNALIGNED;
12682 /* FIXME: record alignment? we can assume 1 for now */
12686 case CEE_VOLATILE_:
12687 ins_flag |= MONO_INST_VOLATILE;
12691 ins_flag |= MONO_INST_TAILCALL;
12692 cfg->flags |= MONO_CFG_HAS_TAIL;
12693 /* Can't inline tail calls at this time */
12694 inline_costs += 100000;
12701 token = read32 (ip + 2);
12702 klass = mini_get_class (method, token, generic_context);
12703 CHECK_TYPELOAD (klass);
12704 if (generic_class_is_reference_type (cfg, klass))
12705 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
12707 mini_emit_initobj (cfg, *sp, NULL, klass);
12711 case CEE_CONSTRAINED_:
12713 token = read32 (ip + 2);
12714 constrained_class = mini_get_class (method, token, generic_context);
12715 CHECK_TYPELOAD (constrained_class);
12719 case CEE_INITBLK: {
12720 MonoInst *iargs [3];
12724 /* Skip optimized paths for volatile operations. */
12725 if ((ip [1] == CEE_CPBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
12726 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
12727 } else if ((ip [1] == CEE_INITBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
12728 /* emit_memset only works when val == 0 */
12729 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
12732 iargs [0] = sp [0];
12733 iargs [1] = sp [1];
12734 iargs [2] = sp [2];
12735 if (ip [1] == CEE_CPBLK) {
12737 * FIXME: It's unclear whether we should be emitting both the acquire
12738 * and release barriers for cpblk. It is technically both a load and
12739 * store operation, so it seems like that's the sensible thing to do.
12741 * FIXME: We emit full barriers on both sides of the operation for
12742 * simplicity. We should have a separate atomic memcpy method instead.
12744 MonoMethod *memcpy_method = get_memcpy_method ();
12746 if (ins_flag & MONO_INST_VOLATILE)
12747 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
12749 call = mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
12750 call->flags |= ins_flag;
12752 if (ins_flag & MONO_INST_VOLATILE)
12753 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
12755 MonoMethod *memset_method = get_memset_method ();
12756 if (ins_flag & MONO_INST_VOLATILE) {
12757 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
12758 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
12760 call = mono_emit_method_call (cfg, memset_method, iargs, NULL);
12761 call->flags |= ins_flag;
12772 ins_flag |= MONO_INST_NOTYPECHECK;
12774 ins_flag |= MONO_INST_NORANGECHECK;
12775 /* we ignore the no-nullcheck for now since we
12776 * really do it explicitly only when doing callvirt->call
12780 case CEE_RETHROW: {
12782 int handler_offset = -1;
12784 for (i = 0; i < header->num_clauses; ++i) {
12785 MonoExceptionClause *clause = &header->clauses [i];
12786 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
12787 handler_offset = clause->handler_offset;
12792 bblock->flags |= BB_EXCEPTION_UNSAFE;
12794 if (handler_offset == -1)
12797 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
12798 MONO_INST_NEW (cfg, ins, OP_RETHROW);
12799 ins->sreg1 = load->dreg;
12800 MONO_ADD_INS (bblock, ins);
12802 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
12803 MONO_ADD_INS (bblock, ins);
12806 link_bblock (cfg, bblock, end_bblock);
12807 start_new_bblock = 1;
12815 CHECK_STACK_OVF (1);
12817 token = read32 (ip + 2);
12818 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !image_is_dynamic (method->klass->image) && !generic_context) {
12819 MonoType *type = mono_type_create_from_typespec_checked (image, token, &cfg->error);
12822 val = mono_type_size (type, &ialign);
12824 MonoClass *klass = mini_get_class (method, token, generic_context);
12825 CHECK_TYPELOAD (klass);
12827 val = mono_type_size (&klass->byval_arg, &ialign);
12829 if (mini_is_gsharedvt_klass (cfg, klass))
12830 GSHAREDVT_FAILURE (*ip);
12832 EMIT_NEW_ICONST (cfg, ins, val);
12837 case CEE_REFANYTYPE: {
12838 MonoInst *src_var, *src;
12840 GSHAREDVT_FAILURE (*ip);
12846 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
12848 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
12849 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
12850 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type));
12855 case CEE_READONLY_:
12868 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
12878 g_warning ("opcode 0x%02x not handled", *ip);
12882 if (start_new_bblock != 1)
12885 bblock->cil_length = ip - bblock->cil_code;
12886 if (bblock->next_bb) {
12887 /* This could already be set because of inlining, #693905 */
12888 MonoBasicBlock *bb = bblock;
12890 while (bb->next_bb)
12892 bb->next_bb = end_bblock;
12894 bblock->next_bb = end_bblock;
12897 if (cfg->method == method && cfg->domainvar) {
12899 MonoInst *get_domain;
12901 cfg->cbb = init_localsbb;
12903 if ((get_domain = mono_get_domain_intrinsic (cfg))) {
12904 MONO_ADD_INS (cfg->cbb, get_domain);
12906 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
12908 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
12909 MONO_ADD_INS (cfg->cbb, store);
12912 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
12913 if (cfg->compile_aot)
12914 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
12915 mono_get_got_var (cfg);
12918 if (cfg->method == method && cfg->got_var)
12919 mono_emit_load_got_addr (cfg);
12921 if (init_localsbb) {
12922 cfg->cbb = init_localsbb;
12924 for (i = 0; i < header->num_locals; ++i) {
12925 emit_init_local (cfg, i, header->locals [i], init_locals);
12929 if (cfg->init_ref_vars && cfg->method == method) {
12930 /* Emit initialization for ref vars */
12931 // FIXME: Avoid duplication initialization for IL locals.
12932 for (i = 0; i < cfg->num_varinfo; ++i) {
12933 MonoInst *ins = cfg->varinfo [i];
12935 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
12936 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
12940 if (cfg->lmf_var && cfg->method == method) {
12941 cfg->cbb = init_localsbb;
12942 emit_push_lmf (cfg);
12945 cfg->cbb = init_localsbb;
12946 emit_instrumentation_call (cfg, mono_profiler_method_enter);
12949 MonoBasicBlock *bb;
12952 * Make seq points at backward branch targets interruptable.
12954 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
12955 if (bb->code && bb->in_count > 1 && bb->code->opcode == OP_SEQ_POINT)
12956 bb->code->flags |= MONO_INST_SINGLE_STEP_LOC;
12959 /* Add a sequence point for method entry/exit events */
12960 if (cfg->gen_seq_points_debug_data) {
12961 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
12962 MONO_ADD_INS (init_localsbb, ins);
12963 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
12964 MONO_ADD_INS (cfg->bb_exit, ins);
12968 * Add seq points for IL offsets which have line number info, but wasn't generated a seq point during JITting because
12969 * the code they refer to was dead (#11880).
12971 if (sym_seq_points) {
12972 for (i = 0; i < header->code_size; ++i) {
12973 if (mono_bitset_test_fast (seq_point_locs, i) && !mono_bitset_test_fast (seq_point_set_locs, i)) {
12976 NEW_SEQ_POINT (cfg, ins, i, FALSE);
12977 mono_add_seq_point (cfg, NULL, ins, SEQ_POINT_NATIVE_OFFSET_DEAD_CODE);
12984 if (cfg->method == method) {
12985 MonoBasicBlock *bb;
12986 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
12987 bb->region = mono_find_block_region (cfg, bb->real_offset);
12989 mono_create_spvar_for_region (cfg, bb->region);
12990 if (cfg->verbose_level > 2)
12991 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
12995 if (inline_costs < 0) {
12998 /* Method is too large */
12999 mname = mono_method_full_name (method, TRUE);
13000 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
13001 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
13005 if ((cfg->verbose_level > 2) && (cfg->method == method))
13006 mono_print_code (cfg, "AFTER METHOD-TO-IR");
13011 g_assert (!mono_error_ok (&cfg->error));
13015 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
13019 set_exception_type_from_invalid_il (cfg, method, ip);
13023 g_slist_free (class_inits);
13024 mono_basic_block_free (original_bb);
13025 cfg->dont_inline = g_list_remove (cfg->dont_inline, method);
13026 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
13027 if (cfg->exception_type)
13030 return inline_costs;
13034 store_membase_reg_to_store_membase_imm (int opcode)
13037 case OP_STORE_MEMBASE_REG:
13038 return OP_STORE_MEMBASE_IMM;
13039 case OP_STOREI1_MEMBASE_REG:
13040 return OP_STOREI1_MEMBASE_IMM;
13041 case OP_STOREI2_MEMBASE_REG:
13042 return OP_STOREI2_MEMBASE_IMM;
13043 case OP_STOREI4_MEMBASE_REG:
13044 return OP_STOREI4_MEMBASE_IMM;
13045 case OP_STOREI8_MEMBASE_REG:
13046 return OP_STOREI8_MEMBASE_IMM;
13048 g_assert_not_reached ();
13055 mono_op_to_op_imm (int opcode)
13059 return OP_IADD_IMM;
13061 return OP_ISUB_IMM;
13063 return OP_IDIV_IMM;
13065 return OP_IDIV_UN_IMM;
13067 return OP_IREM_IMM;
13069 return OP_IREM_UN_IMM;
13071 return OP_IMUL_IMM;
13073 return OP_IAND_IMM;
13077 return OP_IXOR_IMM;
13079 return OP_ISHL_IMM;
13081 return OP_ISHR_IMM;
13083 return OP_ISHR_UN_IMM;
13086 return OP_LADD_IMM;
13088 return OP_LSUB_IMM;
13090 return OP_LAND_IMM;
13094 return OP_LXOR_IMM;
13096 return OP_LSHL_IMM;
13098 return OP_LSHR_IMM;
13100 return OP_LSHR_UN_IMM;
13101 #if SIZEOF_REGISTER == 8
13103 return OP_LREM_IMM;
13107 return OP_COMPARE_IMM;
13109 return OP_ICOMPARE_IMM;
13111 return OP_LCOMPARE_IMM;
13113 case OP_STORE_MEMBASE_REG:
13114 return OP_STORE_MEMBASE_IMM;
13115 case OP_STOREI1_MEMBASE_REG:
13116 return OP_STOREI1_MEMBASE_IMM;
13117 case OP_STOREI2_MEMBASE_REG:
13118 return OP_STOREI2_MEMBASE_IMM;
13119 case OP_STOREI4_MEMBASE_REG:
13120 return OP_STOREI4_MEMBASE_IMM;
13122 #if defined(TARGET_X86) || defined (TARGET_AMD64)
13124 return OP_X86_PUSH_IMM;
13125 case OP_X86_COMPARE_MEMBASE_REG:
13126 return OP_X86_COMPARE_MEMBASE_IMM;
13128 #if defined(TARGET_AMD64)
13129 case OP_AMD64_ICOMPARE_MEMBASE_REG:
13130 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
13132 case OP_VOIDCALL_REG:
13133 return OP_VOIDCALL;
13141 return OP_LOCALLOC_IMM;
13148 ldind_to_load_membase (int opcode)
13152 return OP_LOADI1_MEMBASE;
13154 return OP_LOADU1_MEMBASE;
13156 return OP_LOADI2_MEMBASE;
13158 return OP_LOADU2_MEMBASE;
13160 return OP_LOADI4_MEMBASE;
13162 return OP_LOADU4_MEMBASE;
13164 return OP_LOAD_MEMBASE;
13165 case CEE_LDIND_REF:
13166 return OP_LOAD_MEMBASE;
13168 return OP_LOADI8_MEMBASE;
13170 return OP_LOADR4_MEMBASE;
13172 return OP_LOADR8_MEMBASE;
13174 g_assert_not_reached ();
13181 stind_to_store_membase (int opcode)
13185 return OP_STOREI1_MEMBASE_REG;
13187 return OP_STOREI2_MEMBASE_REG;
13189 return OP_STOREI4_MEMBASE_REG;
13191 case CEE_STIND_REF:
13192 return OP_STORE_MEMBASE_REG;
13194 return OP_STOREI8_MEMBASE_REG;
13196 return OP_STORER4_MEMBASE_REG;
13198 return OP_STORER8_MEMBASE_REG;
13200 g_assert_not_reached ();
13207 mono_load_membase_to_load_mem (int opcode)
13209 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
13210 #if defined(TARGET_X86) || defined(TARGET_AMD64)
13212 case OP_LOAD_MEMBASE:
13213 return OP_LOAD_MEM;
13214 case OP_LOADU1_MEMBASE:
13215 return OP_LOADU1_MEM;
13216 case OP_LOADU2_MEMBASE:
13217 return OP_LOADU2_MEM;
13218 case OP_LOADI4_MEMBASE:
13219 return OP_LOADI4_MEM;
13220 case OP_LOADU4_MEMBASE:
13221 return OP_LOADU4_MEM;
13222 #if SIZEOF_REGISTER == 8
13223 case OP_LOADI8_MEMBASE:
13224 return OP_LOADI8_MEM;
13233 op_to_op_dest_membase (int store_opcode, int opcode)
13235 #if defined(TARGET_X86)
13236 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
13241 return OP_X86_ADD_MEMBASE_REG;
13243 return OP_X86_SUB_MEMBASE_REG;
13245 return OP_X86_AND_MEMBASE_REG;
13247 return OP_X86_OR_MEMBASE_REG;
13249 return OP_X86_XOR_MEMBASE_REG;
13252 return OP_X86_ADD_MEMBASE_IMM;
13255 return OP_X86_SUB_MEMBASE_IMM;
13258 return OP_X86_AND_MEMBASE_IMM;
13261 return OP_X86_OR_MEMBASE_IMM;
13264 return OP_X86_XOR_MEMBASE_IMM;
13270 #if defined(TARGET_AMD64)
13271 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
13276 return OP_X86_ADD_MEMBASE_REG;
13278 return OP_X86_SUB_MEMBASE_REG;
13280 return OP_X86_AND_MEMBASE_REG;
13282 return OP_X86_OR_MEMBASE_REG;
13284 return OP_X86_XOR_MEMBASE_REG;
13286 return OP_X86_ADD_MEMBASE_IMM;
13288 return OP_X86_SUB_MEMBASE_IMM;
13290 return OP_X86_AND_MEMBASE_IMM;
13292 return OP_X86_OR_MEMBASE_IMM;
13294 return OP_X86_XOR_MEMBASE_IMM;
13296 return OP_AMD64_ADD_MEMBASE_REG;
13298 return OP_AMD64_SUB_MEMBASE_REG;
13300 return OP_AMD64_AND_MEMBASE_REG;
13302 return OP_AMD64_OR_MEMBASE_REG;
13304 return OP_AMD64_XOR_MEMBASE_REG;
13307 return OP_AMD64_ADD_MEMBASE_IMM;
13310 return OP_AMD64_SUB_MEMBASE_IMM;
13313 return OP_AMD64_AND_MEMBASE_IMM;
13316 return OP_AMD64_OR_MEMBASE_IMM;
13319 return OP_AMD64_XOR_MEMBASE_IMM;
13329 op_to_op_store_membase (int store_opcode, int opcode)
13331 #if defined(TARGET_X86) || defined(TARGET_AMD64)
13334 if (store_opcode == OP_STOREI1_MEMBASE_REG)
13335 return OP_X86_SETEQ_MEMBASE;
13337 if (store_opcode == OP_STOREI1_MEMBASE_REG)
13338 return OP_X86_SETNE_MEMBASE;
13346 op_to_op_src1_membase (int load_opcode, int opcode)
13349 /* FIXME: This has sign extension issues */
13351 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
13352 return OP_X86_COMPARE_MEMBASE8_IMM;
13355 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
13360 return OP_X86_PUSH_MEMBASE;
13361 case OP_COMPARE_IMM:
13362 case OP_ICOMPARE_IMM:
13363 return OP_X86_COMPARE_MEMBASE_IMM;
13366 return OP_X86_COMPARE_MEMBASE_REG;
13370 #ifdef TARGET_AMD64
13371 /* FIXME: This has sign extension issues */
13373 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
13374 return OP_X86_COMPARE_MEMBASE8_IMM;
13379 #ifdef __mono_ilp32__
13380 if (load_opcode == OP_LOADI8_MEMBASE)
13382 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
13384 return OP_X86_PUSH_MEMBASE;
13386 /* FIXME: This only works for 32 bit immediates
13387 case OP_COMPARE_IMM:
13388 case OP_LCOMPARE_IMM:
13389 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
13390 return OP_AMD64_COMPARE_MEMBASE_IMM;
13392 case OP_ICOMPARE_IMM:
13393 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
13394 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
13398 #ifdef __mono_ilp32__
13399 if (load_opcode == OP_LOAD_MEMBASE)
13400 return OP_AMD64_ICOMPARE_MEMBASE_REG;
13401 if (load_opcode == OP_LOADI8_MEMBASE)
13403 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
13405 return OP_AMD64_COMPARE_MEMBASE_REG;
13408 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
13409 return OP_AMD64_ICOMPARE_MEMBASE_REG;
13418 op_to_op_src2_membase (int load_opcode, int opcode)
13421 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
13427 return OP_X86_COMPARE_REG_MEMBASE;
13429 return OP_X86_ADD_REG_MEMBASE;
13431 return OP_X86_SUB_REG_MEMBASE;
13433 return OP_X86_AND_REG_MEMBASE;
13435 return OP_X86_OR_REG_MEMBASE;
13437 return OP_X86_XOR_REG_MEMBASE;
13441 #ifdef TARGET_AMD64
13442 #ifdef __mono_ilp32__
13443 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE) ) {
13445 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)) {
13449 return OP_AMD64_ICOMPARE_REG_MEMBASE;
13451 return OP_X86_ADD_REG_MEMBASE;
13453 return OP_X86_SUB_REG_MEMBASE;
13455 return OP_X86_AND_REG_MEMBASE;
13457 return OP_X86_OR_REG_MEMBASE;
13459 return OP_X86_XOR_REG_MEMBASE;
13461 #ifdef __mono_ilp32__
13462 } else if (load_opcode == OP_LOADI8_MEMBASE) {
13464 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE)) {
13469 return OP_AMD64_COMPARE_REG_MEMBASE;
13471 return OP_AMD64_ADD_REG_MEMBASE;
13473 return OP_AMD64_SUB_REG_MEMBASE;
13475 return OP_AMD64_AND_REG_MEMBASE;
13477 return OP_AMD64_OR_REG_MEMBASE;
13479 return OP_AMD64_XOR_REG_MEMBASE;
13488 mono_op_to_op_imm_noemul (int opcode)
13491 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
13497 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
13504 #if defined(MONO_ARCH_EMULATE_MUL_DIV)
13509 return mono_op_to_op_imm (opcode);
13514 * mono_handle_global_vregs:
13516 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
13520 mono_handle_global_vregs (MonoCompile *cfg)
13522 gint32 *vreg_to_bb;
13523 MonoBasicBlock *bb;
13526 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
13528 #ifdef MONO_ARCH_SIMD_INTRINSICS
13529 if (cfg->uses_simd_intrinsics)
13530 mono_simd_simplify_indirection (cfg);
13533 /* Find local vregs used in more than one bb */
13534 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13535 MonoInst *ins = bb->code;
13536 int block_num = bb->block_num;
13538 if (cfg->verbose_level > 2)
13539 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
13542 for (; ins; ins = ins->next) {
13543 const char *spec = INS_INFO (ins->opcode);
13544 int regtype = 0, regindex;
13547 if (G_UNLIKELY (cfg->verbose_level > 2))
13548 mono_print_ins (ins);
13550 g_assert (ins->opcode >= MONO_CEE_LAST);
13552 for (regindex = 0; regindex < 4; regindex ++) {
13555 if (regindex == 0) {
13556 regtype = spec [MONO_INST_DEST];
13557 if (regtype == ' ')
13560 } else if (regindex == 1) {
13561 regtype = spec [MONO_INST_SRC1];
13562 if (regtype == ' ')
13565 } else if (regindex == 2) {
13566 regtype = spec [MONO_INST_SRC2];
13567 if (regtype == ' ')
13570 } else if (regindex == 3) {
13571 regtype = spec [MONO_INST_SRC3];
13572 if (regtype == ' ')
13577 #if SIZEOF_REGISTER == 4
13578 /* In the LLVM case, the long opcodes are not decomposed */
13579 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
13581 * Since some instructions reference the original long vreg,
13582 * and some reference the two component vregs, it is quite hard
13583 * to determine when it needs to be global. So be conservative.
13585 if (!get_vreg_to_inst (cfg, vreg)) {
13586 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
13588 if (cfg->verbose_level > 2)
13589 printf ("LONG VREG R%d made global.\n", vreg);
13593 * Make the component vregs volatile since the optimizations can
13594 * get confused otherwise.
13596 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
13597 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
13601 g_assert (vreg != -1);
13603 prev_bb = vreg_to_bb [vreg];
13604 if (prev_bb == 0) {
13605 /* 0 is a valid block num */
13606 vreg_to_bb [vreg] = block_num + 1;
13607 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
13608 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
13611 if (!get_vreg_to_inst (cfg, vreg)) {
13612 if (G_UNLIKELY (cfg->verbose_level > 2))
13613 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
13617 if (vreg_is_ref (cfg, vreg))
13618 mono_compile_create_var_for_vreg (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL, vreg);
13620 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
13623 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
13626 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
13629 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
13632 g_assert_not_reached ();
13636 /* Flag as having been used in more than one bb */
13637 vreg_to_bb [vreg] = -1;
13643 /* If a variable is used in only one bblock, convert it into a local vreg */
13644 for (i = 0; i < cfg->num_varinfo; i++) {
13645 MonoInst *var = cfg->varinfo [i];
13646 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
13648 switch (var->type) {
13654 #if SIZEOF_REGISTER == 8
13657 #if !defined(TARGET_X86)
13658 /* Enabling this screws up the fp stack on x86 */
13661 if (mono_arch_is_soft_float ())
13664 /* Arguments are implicitly global */
13665 /* Putting R4 vars into registers doesn't work currently */
13666 /* The gsharedvt vars are implicitly referenced by ldaddr opcodes, but those opcodes are only generated later */
13667 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg && var != cfg->gsharedvt_info_var && var != cfg->gsharedvt_locals_var && var != cfg->lmf_addr_var) {
13669 * Make that the variable's liveness interval doesn't contain a call, since
13670 * that would cause the lvreg to be spilled, making the whole optimization
13673 /* This is too slow for JIT compilation */
13675 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
13677 int def_index, call_index, ins_index;
13678 gboolean spilled = FALSE;
13683 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
13684 const char *spec = INS_INFO (ins->opcode);
13686 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
13687 def_index = ins_index;
13689 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
13690 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
13691 if (call_index > def_index) {
13697 if (MONO_IS_CALL (ins))
13698 call_index = ins_index;
13708 if (G_UNLIKELY (cfg->verbose_level > 2))
13709 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
13710 var->flags |= MONO_INST_IS_DEAD;
13711 cfg->vreg_to_inst [var->dreg] = NULL;
13718 * Compress the varinfo and vars tables so the liveness computation is faster and
13719 * takes up less space.
13722 for (i = 0; i < cfg->num_varinfo; ++i) {
13723 MonoInst *var = cfg->varinfo [i];
13724 if (pos < i && cfg->locals_start == i)
13725 cfg->locals_start = pos;
13726 if (!(var->flags & MONO_INST_IS_DEAD)) {
13728 cfg->varinfo [pos] = cfg->varinfo [i];
13729 cfg->varinfo [pos]->inst_c0 = pos;
13730 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
13731 cfg->vars [pos].idx = pos;
13732 #if SIZEOF_REGISTER == 4
13733 if (cfg->varinfo [pos]->type == STACK_I8) {
13734 /* Modify the two component vars too */
13737 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
13738 var1->inst_c0 = pos;
13739 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
13740 var1->inst_c0 = pos;
13747 cfg->num_varinfo = pos;
13748 if (cfg->locals_start > cfg->num_varinfo)
13749 cfg->locals_start = cfg->num_varinfo;
13753 * mono_spill_global_vars:
13755 * Generate spill code for variables which are not allocated to registers,
13756 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
13757 * code is generated which could be optimized by the local optimization passes.
13760 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
13762 MonoBasicBlock *bb;
13764 int orig_next_vreg;
13765 guint32 *vreg_to_lvreg;
13767 guint32 i, lvregs_len;
13768 gboolean dest_has_lvreg = FALSE;
13769 guint32 stacktypes [128];
13770 MonoInst **live_range_start, **live_range_end;
13771 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
13772 int *gsharedvt_vreg_to_idx = NULL;
13774 *need_local_opts = FALSE;
13776 memset (spec2, 0, sizeof (spec2));
13778 /* FIXME: Move this function to mini.c */
13779 stacktypes ['i'] = STACK_PTR;
13780 stacktypes ['l'] = STACK_I8;
13781 stacktypes ['f'] = STACK_R8;
13782 #ifdef MONO_ARCH_SIMD_INTRINSICS
13783 stacktypes ['x'] = STACK_VTYPE;
13786 #if SIZEOF_REGISTER == 4
13787 /* Create MonoInsts for longs */
13788 for (i = 0; i < cfg->num_varinfo; i++) {
13789 MonoInst *ins = cfg->varinfo [i];
13791 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
13792 switch (ins->type) {
13797 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
13800 g_assert (ins->opcode == OP_REGOFFSET);
13802 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
13804 tree->opcode = OP_REGOFFSET;
13805 tree->inst_basereg = ins->inst_basereg;
13806 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
13808 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
13810 tree->opcode = OP_REGOFFSET;
13811 tree->inst_basereg = ins->inst_basereg;
13812 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
13822 if (cfg->compute_gc_maps) {
13823 /* registers need liveness info even for !non refs */
13824 for (i = 0; i < cfg->num_varinfo; i++) {
13825 MonoInst *ins = cfg->varinfo [i];
13827 if (ins->opcode == OP_REGVAR)
13828 ins->flags |= MONO_INST_GC_TRACK;
13832 if (cfg->gsharedvt) {
13833 gsharedvt_vreg_to_idx = mono_mempool_alloc0 (cfg->mempool, sizeof (int) * cfg->next_vreg);
13835 for (i = 0; i < cfg->num_varinfo; ++i) {
13836 MonoInst *ins = cfg->varinfo [i];
13839 if (mini_is_gsharedvt_variable_type (cfg, ins->inst_vtype)) {
13840 if (i >= cfg->locals_start) {
13842 idx = get_gsharedvt_info_slot (cfg, ins->inst_vtype, MONO_RGCTX_INFO_LOCAL_OFFSET);
13843 gsharedvt_vreg_to_idx [ins->dreg] = idx + 1;
13844 ins->opcode = OP_GSHAREDVT_LOCAL;
13845 ins->inst_imm = idx;
13848 gsharedvt_vreg_to_idx [ins->dreg] = -1;
13849 ins->opcode = OP_GSHAREDVT_ARG_REGOFFSET;
13855 /* FIXME: widening and truncation */
13858 * As an optimization, when a variable allocated to the stack is first loaded into
13859 * an lvreg, we will remember the lvreg and use it the next time instead of loading
13860 * the variable again.
13862 orig_next_vreg = cfg->next_vreg;
13863 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
13864 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
13868 * These arrays contain the first and last instructions accessing a given
13870 * Since we emit bblocks in the same order we process them here, and we
13871 * don't split live ranges, these will precisely describe the live range of
13872 * the variable, i.e. the instruction range where a valid value can be found
13873 * in the variables location.
13874 * The live range is computed using the liveness info computed by the liveness pass.
13875 * We can't use vmv->range, since that is an abstract live range, and we need
13876 * one which is instruction precise.
13877 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
13879 /* FIXME: Only do this if debugging info is requested */
13880 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
13881 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
13882 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
13883 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
13885 /* Add spill loads/stores */
13886 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13889 if (cfg->verbose_level > 2)
13890 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
13892 /* Clear vreg_to_lvreg array */
13893 for (i = 0; i < lvregs_len; i++)
13894 vreg_to_lvreg [lvregs [i]] = 0;
13898 MONO_BB_FOR_EACH_INS (bb, ins) {
13899 const char *spec = INS_INFO (ins->opcode);
13900 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
13901 gboolean store, no_lvreg;
13902 int sregs [MONO_MAX_SRC_REGS];
13904 if (G_UNLIKELY (cfg->verbose_level > 2))
13905 mono_print_ins (ins);
13907 if (ins->opcode == OP_NOP)
13911 * We handle LDADDR here as well, since it can only be decomposed
13912 * when variable addresses are known.
13914 if (ins->opcode == OP_LDADDR) {
13915 MonoInst *var = ins->inst_p0;
13917 if (var->opcode == OP_VTARG_ADDR) {
13918 /* Happens on SPARC/S390 where vtypes are passed by reference */
13919 MonoInst *vtaddr = var->inst_left;
13920 if (vtaddr->opcode == OP_REGVAR) {
13921 ins->opcode = OP_MOVE;
13922 ins->sreg1 = vtaddr->dreg;
13924 else if (var->inst_left->opcode == OP_REGOFFSET) {
13925 ins->opcode = OP_LOAD_MEMBASE;
13926 ins->inst_basereg = vtaddr->inst_basereg;
13927 ins->inst_offset = vtaddr->inst_offset;
13930 } else if (cfg->gsharedvt && gsharedvt_vreg_to_idx [var->dreg] < 0) {
13931 /* gsharedvt arg passed by ref */
13932 g_assert (var->opcode == OP_GSHAREDVT_ARG_REGOFFSET);
13934 ins->opcode = OP_LOAD_MEMBASE;
13935 ins->inst_basereg = var->inst_basereg;
13936 ins->inst_offset = var->inst_offset;
13937 } else if (cfg->gsharedvt && gsharedvt_vreg_to_idx [var->dreg]) {
13938 MonoInst *load, *load2, *load3;
13939 int idx = gsharedvt_vreg_to_idx [var->dreg] - 1;
13940 int reg1, reg2, reg3;
13941 MonoInst *info_var = cfg->gsharedvt_info_var;
13942 MonoInst *locals_var = cfg->gsharedvt_locals_var;
13946 * Compute the address of the local as gsharedvt_locals_var + gsharedvt_info_var->locals_offsets [idx].
13949 g_assert (var->opcode == OP_GSHAREDVT_LOCAL);
13951 g_assert (info_var);
13952 g_assert (locals_var);
13954 /* Mark the instruction used to compute the locals var as used */
13955 cfg->gsharedvt_locals_var_ins = NULL;
13957 /* Load the offset */
13958 if (info_var->opcode == OP_REGOFFSET) {
13959 reg1 = alloc_ireg (cfg);
13960 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, reg1, info_var->inst_basereg, info_var->inst_offset);
13961 } else if (info_var->opcode == OP_REGVAR) {
13963 reg1 = info_var->dreg;
13965 g_assert_not_reached ();
13967 reg2 = alloc_ireg (cfg);
13968 NEW_LOAD_MEMBASE (cfg, load2, OP_LOADI4_MEMBASE, reg2, reg1, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
13969 /* Load the locals area address */
13970 reg3 = alloc_ireg (cfg);
13971 if (locals_var->opcode == OP_REGOFFSET) {
13972 NEW_LOAD_MEMBASE (cfg, load3, OP_LOAD_MEMBASE, reg3, locals_var->inst_basereg, locals_var->inst_offset);
13973 } else if (locals_var->opcode == OP_REGVAR) {
13974 NEW_UNALU (cfg, load3, OP_MOVE, reg3, locals_var->dreg);
13976 g_assert_not_reached ();
13978 /* Compute the address */
13979 ins->opcode = OP_PADD;
13983 mono_bblock_insert_before_ins (bb, ins, load3);
13984 mono_bblock_insert_before_ins (bb, load3, load2);
13986 mono_bblock_insert_before_ins (bb, load2, load);
13988 g_assert (var->opcode == OP_REGOFFSET);
13990 ins->opcode = OP_ADD_IMM;
13991 ins->sreg1 = var->inst_basereg;
13992 ins->inst_imm = var->inst_offset;
13995 *need_local_opts = TRUE;
13996 spec = INS_INFO (ins->opcode);
13999 if (ins->opcode < MONO_CEE_LAST) {
14000 mono_print_ins (ins);
14001 g_assert_not_reached ();
14005 * Store opcodes have destbasereg in the dreg, but in reality, it is an
14009 if (MONO_IS_STORE_MEMBASE (ins)) {
14010 tmp_reg = ins->dreg;
14011 ins->dreg = ins->sreg2;
14012 ins->sreg2 = tmp_reg;
14015 spec2 [MONO_INST_DEST] = ' ';
14016 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
14017 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
14018 spec2 [MONO_INST_SRC3] = ' ';
14020 } else if (MONO_IS_STORE_MEMINDEX (ins))
14021 g_assert_not_reached ();
14026 if (G_UNLIKELY (cfg->verbose_level > 2)) {
14027 printf ("\t %.3s %d", spec, ins->dreg);
14028 num_sregs = mono_inst_get_src_registers (ins, sregs);
14029 for (srcindex = 0; srcindex < num_sregs; ++srcindex)
14030 printf (" %d", sregs [srcindex]);
14037 regtype = spec [MONO_INST_DEST];
14038 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
14041 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
14042 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
14043 MonoInst *store_ins;
14045 MonoInst *def_ins = ins;
14046 int dreg = ins->dreg; /* The original vreg */
14048 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
14050 if (var->opcode == OP_REGVAR) {
14051 ins->dreg = var->dreg;
14052 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
14054 * Instead of emitting a load+store, use a _membase opcode.
14056 g_assert (var->opcode == OP_REGOFFSET);
14057 if (ins->opcode == OP_MOVE) {
14061 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
14062 ins->inst_basereg = var->inst_basereg;
14063 ins->inst_offset = var->inst_offset;
14066 spec = INS_INFO (ins->opcode);
14070 g_assert (var->opcode == OP_REGOFFSET);
14072 prev_dreg = ins->dreg;
14074 /* Invalidate any previous lvreg for this vreg */
14075 vreg_to_lvreg [ins->dreg] = 0;
14079 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
14081 store_opcode = OP_STOREI8_MEMBASE_REG;
14084 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
14086 #if SIZEOF_REGISTER != 8
14087 if (regtype == 'l') {
14088 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
14089 mono_bblock_insert_after_ins (bb, ins, store_ins);
14090 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
14091 mono_bblock_insert_after_ins (bb, ins, store_ins);
14092 def_ins = store_ins;
14097 g_assert (store_opcode != OP_STOREV_MEMBASE);
14099 /* Try to fuse the store into the instruction itself */
14100 /* FIXME: Add more instructions */
14101 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
14102 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
14103 ins->inst_imm = ins->inst_c0;
14104 ins->inst_destbasereg = var->inst_basereg;
14105 ins->inst_offset = var->inst_offset;
14106 spec = INS_INFO (ins->opcode);
14107 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE) || (ins->opcode == OP_RMOVE))) {
14108 ins->opcode = store_opcode;
14109 ins->inst_destbasereg = var->inst_basereg;
14110 ins->inst_offset = var->inst_offset;
14114 tmp_reg = ins->dreg;
14115 ins->dreg = ins->sreg2;
14116 ins->sreg2 = tmp_reg;
14119 spec2 [MONO_INST_DEST] = ' ';
14120 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
14121 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
14122 spec2 [MONO_INST_SRC3] = ' ';
14124 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
14125 // FIXME: The backends expect the base reg to be in inst_basereg
14126 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
14128 ins->inst_basereg = var->inst_basereg;
14129 ins->inst_offset = var->inst_offset;
14130 spec = INS_INFO (ins->opcode);
14132 /* printf ("INS: "); mono_print_ins (ins); */
14133 /* Create a store instruction */
14134 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
14136 /* Insert it after the instruction */
14137 mono_bblock_insert_after_ins (bb, ins, store_ins);
14139 def_ins = store_ins;
14142 * We can't assign ins->dreg to var->dreg here, since the
14143 * sregs could use it. So set a flag, and do it after
14146 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
14147 dest_has_lvreg = TRUE;
14152 if (def_ins && !live_range_start [dreg]) {
14153 live_range_start [dreg] = def_ins;
14154 live_range_start_bb [dreg] = bb;
14157 if (cfg->compute_gc_maps && def_ins && (var->flags & MONO_INST_GC_TRACK)) {
14160 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_DEF);
14161 tmp->inst_c1 = dreg;
14162 mono_bblock_insert_after_ins (bb, def_ins, tmp);
14169 num_sregs = mono_inst_get_src_registers (ins, sregs);
14170 for (srcindex = 0; srcindex < 3; ++srcindex) {
14171 regtype = spec [MONO_INST_SRC1 + srcindex];
14172 sreg = sregs [srcindex];
14174 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
14175 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
14176 MonoInst *var = get_vreg_to_inst (cfg, sreg);
14177 MonoInst *use_ins = ins;
14178 MonoInst *load_ins;
14179 guint32 load_opcode;
14181 if (var->opcode == OP_REGVAR) {
14182 sregs [srcindex] = var->dreg;
14183 //mono_inst_set_src_registers (ins, sregs);
14184 live_range_end [sreg] = use_ins;
14185 live_range_end_bb [sreg] = bb;
14187 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
14190 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
14191 /* var->dreg is a hreg */
14192 tmp->inst_c1 = sreg;
14193 mono_bblock_insert_after_ins (bb, ins, tmp);
14199 g_assert (var->opcode == OP_REGOFFSET);
14201 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
14203 g_assert (load_opcode != OP_LOADV_MEMBASE);
14205 if (vreg_to_lvreg [sreg]) {
14206 g_assert (vreg_to_lvreg [sreg] != -1);
14208 /* The variable is already loaded to an lvreg */
14209 if (G_UNLIKELY (cfg->verbose_level > 2))
14210 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
14211 sregs [srcindex] = vreg_to_lvreg [sreg];
14212 //mono_inst_set_src_registers (ins, sregs);
14216 /* Try to fuse the load into the instruction */
14217 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
14218 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
14219 sregs [0] = var->inst_basereg;
14220 //mono_inst_set_src_registers (ins, sregs);
14221 ins->inst_offset = var->inst_offset;
14222 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
14223 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
14224 sregs [1] = var->inst_basereg;
14225 //mono_inst_set_src_registers (ins, sregs);
14226 ins->inst_offset = var->inst_offset;
14228 if (MONO_IS_REAL_MOVE (ins)) {
14229 ins->opcode = OP_NOP;
14232 //printf ("%d ", srcindex); mono_print_ins (ins);
14234 sreg = alloc_dreg (cfg, stacktypes [regtype]);
14236 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
14237 if (var->dreg == prev_dreg) {
14239 * sreg refers to the value loaded by the load
14240 * emitted below, but we need to use ins->dreg
14241 * since it refers to the store emitted earlier.
14245 g_assert (sreg != -1);
14246 vreg_to_lvreg [var->dreg] = sreg;
14247 g_assert (lvregs_len < 1024);
14248 lvregs [lvregs_len ++] = var->dreg;
14252 sregs [srcindex] = sreg;
14253 //mono_inst_set_src_registers (ins, sregs);
14255 #if SIZEOF_REGISTER != 8
14256 if (regtype == 'l') {
14257 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
14258 mono_bblock_insert_before_ins (bb, ins, load_ins);
14259 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
14260 mono_bblock_insert_before_ins (bb, ins, load_ins);
14261 use_ins = load_ins;
14266 #if SIZEOF_REGISTER == 4
14267 g_assert (load_opcode != OP_LOADI8_MEMBASE);
14269 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
14270 mono_bblock_insert_before_ins (bb, ins, load_ins);
14271 use_ins = load_ins;
14275 if (var->dreg < orig_next_vreg) {
14276 live_range_end [var->dreg] = use_ins;
14277 live_range_end_bb [var->dreg] = bb;
14280 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
14283 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
14284 tmp->inst_c1 = var->dreg;
14285 mono_bblock_insert_after_ins (bb, ins, tmp);
14289 mono_inst_set_src_registers (ins, sregs);
14291 if (dest_has_lvreg) {
14292 g_assert (ins->dreg != -1);
14293 vreg_to_lvreg [prev_dreg] = ins->dreg;
14294 g_assert (lvregs_len < 1024);
14295 lvregs [lvregs_len ++] = prev_dreg;
14296 dest_has_lvreg = FALSE;
14300 tmp_reg = ins->dreg;
14301 ins->dreg = ins->sreg2;
14302 ins->sreg2 = tmp_reg;
14305 if (MONO_IS_CALL (ins)) {
14306 /* Clear vreg_to_lvreg array */
14307 for (i = 0; i < lvregs_len; i++)
14308 vreg_to_lvreg [lvregs [i]] = 0;
14310 } else if (ins->opcode == OP_NOP) {
14312 MONO_INST_NULLIFY_SREGS (ins);
14315 if (cfg->verbose_level > 2)
14316 mono_print_ins_index (1, ins);
14319 /* Extend the live range based on the liveness info */
14320 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
14321 for (i = 0; i < cfg->num_varinfo; i ++) {
14322 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
14324 if (vreg_is_volatile (cfg, vi->vreg))
14325 /* The liveness info is incomplete */
14328 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
14329 /* Live from at least the first ins of this bb */
14330 live_range_start [vi->vreg] = bb->code;
14331 live_range_start_bb [vi->vreg] = bb;
14334 if (mono_bitset_test_fast (bb->live_out_set, i)) {
14335 /* Live at least until the last ins of this bb */
14336 live_range_end [vi->vreg] = bb->last_ins;
14337 live_range_end_bb [vi->vreg] = bb;
14343 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
14345 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
14346 * by storing the current native offset into MonoMethodVar->live_range_start/end.
14348 if (cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
14349 for (i = 0; i < cfg->num_varinfo; ++i) {
14350 int vreg = MONO_VARINFO (cfg, i)->vreg;
14353 if (live_range_start [vreg]) {
14354 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
14356 ins->inst_c1 = vreg;
14357 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
14359 if (live_range_end [vreg]) {
14360 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
14362 ins->inst_c1 = vreg;
14363 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
14364 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
14366 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
14372 if (cfg->gsharedvt_locals_var_ins) {
14373 /* Nullify if unused */
14374 cfg->gsharedvt_locals_var_ins->opcode = OP_PCONST;
14375 cfg->gsharedvt_locals_var_ins->inst_imm = 0;
14378 g_free (live_range_start);
14379 g_free (live_range_end);
14380 g_free (live_range_start_bb);
14381 g_free (live_range_end_bb);
14386 * - use 'iadd' instead of 'int_add'
14387 * - handling ovf opcodes: decompose in method_to_ir.
14388 * - unify iregs/fregs
14389 * -> partly done, the missing parts are:
14390 * - a more complete unification would involve unifying the hregs as well, so
14391 * code wouldn't need if (fp) all over the place. but that would mean the hregs
14392 * would no longer map to the machine hregs, so the code generators would need to
14393 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
14394 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
14395 * fp/non-fp branches speeds it up by about 15%.
14396 * - use sext/zext opcodes instead of shifts
14398 * - get rid of TEMPLOADs if possible and use vregs instead
14399 * - clean up usage of OP_P/OP_ opcodes
14400 * - cleanup usage of DUMMY_USE
14401 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
14403 * - set the stack type and allocate a dreg in the EMIT_NEW macros
14404 * - get rid of all the <foo>2 stuff when the new JIT is ready.
14405 * - make sure handle_stack_args () is called before the branch is emitted
14406 * - when the new IR is done, get rid of all unused stuff
14407 * - COMPARE/BEQ as separate instructions or unify them ?
14408 * - keeping them separate allows specialized compare instructions like
14409 * compare_imm, compare_membase
14410 * - most back ends unify fp compare+branch, fp compare+ceq
14411 * - integrate mono_save_args into inline_method
14412 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
14413 * - handle long shift opts on 32 bit platforms somehow: they require
14414 * 3 sregs (2 for arg1 and 1 for arg2)
14415 * - make byref a 'normal' type.
14416 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
14417 * variable if needed.
14418 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
14419 * like inline_method.
14420 * - remove inlining restrictions
14421 * - fix LNEG and enable cfold of INEG
14422 * - generalize x86 optimizations like ldelema as a peephole optimization
14423 * - add store_mem_imm for amd64
14424 * - optimize the loading of the interruption flag in the managed->native wrappers
14425 * - avoid special handling of OP_NOP in passes
14426 * - move code inserting instructions into one function/macro.
14427 * - try a coalescing phase after liveness analysis
14428 * - add float -> vreg conversion + local optimizations on !x86
14429 * - figure out how to handle decomposed branches during optimizations, ie.
14430 * compare+branch, op_jump_table+op_br etc.
14431 * - promote RuntimeXHandles to vregs
14432 * - vtype cleanups:
14433 * - add a NEW_VARLOADA_VREG macro
14434 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
14435 * accessing vtype fields.
14436 * - get rid of I8CONST on 64 bit platforms
14437 * - dealing with the increase in code size due to branches created during opcode
14439 * - use extended basic blocks
14440 * - all parts of the JIT
14441 * - handle_global_vregs () && local regalloc
14442 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
14443 * - sources of increase in code size:
14446 * - isinst and castclass
14447 * - lvregs not allocated to global registers even if used multiple times
14448 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
14450 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
14451 * - add all micro optimizations from the old JIT
14452 * - put tree optimizations into the deadce pass
14453 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
14454 * specific function.
14455 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
14456 * fcompare + branchCC.
14457 * - create a helper function for allocating a stack slot, taking into account
14458 * MONO_CFG_HAS_SPILLUP.
14460 * - merge the ia64 switch changes.
14461 * - optimize mono_regstate2_alloc_int/float.
14462 * - fix the pessimistic handling of variables accessed in exception handler blocks.
14463 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
14464 * parts of the tree could be separated by other instructions, killing the tree
14465 * arguments, or stores killing loads etc. Also, should we fold loads into other
14466 * instructions if the result of the load is used multiple times ?
14467 * - make the REM_IMM optimization in mini-x86.c arch-independent.
14468 * - LAST MERGE: 108395.
14469 * - when returning vtypes in registers, generate IR and append it to the end of the
14470 * last bb instead of doing it in the epilog.
14471 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
14479 - When to decompose opcodes:
14480 - earlier: this makes some optimizations hard to implement, since the low level IR
14481 no longer contains the neccessary information. But it is easier to do.
14482 - later: harder to implement, enables more optimizations.
14483 - Branches inside bblocks:
14484 - created when decomposing complex opcodes.
14485 - branches to another bblock: harmless, but not tracked by the branch
14486 optimizations, so need to branch to a label at the start of the bblock.
14487 - branches to inside the same bblock: very problematic, trips up the local
14488 reg allocator. Can be fixed by spitting the current bblock, but that is a
14489 complex operation, since some local vregs can become global vregs etc.
14490 - Local/global vregs:
14491 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
14492 local register allocator.
14493 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
14494 structure, created by mono_create_var (). Assigned to hregs or the stack by
14495 the global register allocator.
14496 - When to do optimizations like alu->alu_imm:
14497 - earlier -> saves work later on since the IR will be smaller/simpler
14498 - later -> can work on more instructions
14499 - Handling of valuetypes:
14500 - When a vtype is pushed on the stack, a new temporary is created, an
14501 instruction computing its address (LDADDR) is emitted and pushed on
14502 the stack. Need to optimize cases when the vtype is used immediately as in
14503 argument passing, stloc etc.
14504 - Instead of the to_end stuff in the old JIT, simply call the function handling
14505 the values on the stack before emitting the last instruction of the bb.
14508 #endif /* DISABLE_JIT */