2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
9 * Copyright 2003-2010 Novell, Inc (http://www.novell.com)
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
27 #ifdef HAVE_SYS_TIME_H
35 #include <mono/utils/memcheck.h>
37 #include <mono/metadata/abi-details.h>
38 #include <mono/metadata/assembly.h>
39 #include <mono/metadata/attrdefs.h>
40 #include <mono/metadata/loader.h>
41 #include <mono/metadata/tabledefs.h>
42 #include <mono/metadata/class.h>
43 #include <mono/metadata/object.h>
44 #include <mono/metadata/exception.h>
45 #include <mono/metadata/opcodes.h>
46 #include <mono/metadata/mono-endian.h>
47 #include <mono/metadata/tokentype.h>
48 #include <mono/metadata/tabledefs.h>
49 #include <mono/metadata/marshal.h>
50 #include <mono/metadata/debug-helpers.h>
51 #include <mono/metadata/mono-debug.h>
52 #include <mono/metadata/gc-internal.h>
53 #include <mono/metadata/security-manager.h>
54 #include <mono/metadata/threads-types.h>
55 #include <mono/metadata/security-core-clr.h>
56 #include <mono/metadata/monitor.h>
57 #include <mono/metadata/profiler-private.h>
58 #include <mono/metadata/profiler.h>
59 #include <mono/metadata/debug-mono-symfile.h>
60 #include <mono/utils/mono-compiler.h>
61 #include <mono/utils/mono-memory-model.h>
62 #include <mono/metadata/mono-basic-block.h>
68 #include "jit-icalls.h"
70 #include "debugger-agent.h"
71 #include "seq-points.h"
73 #define BRANCH_COST 10
74 #define INLINE_LENGTH_LIMIT 20
76 /* These have 'cfg' as an implicit argument */
77 #define INLINE_FAILURE(msg) do { \
78 if ((cfg->method != cfg->current_method) && (cfg->current_method->wrapper_type == MONO_WRAPPER_NONE)) { \
79 inline_failure (cfg, msg); \
80 goto exception_exit; \
83 #define CHECK_CFG_EXCEPTION do {\
84 if (cfg->exception_type != MONO_EXCEPTION_NONE) \
85 goto exception_exit; \
87 #define METHOD_ACCESS_FAILURE(method, cmethod) do { \
88 method_access_failure ((cfg), (method), (cmethod)); \
89 goto exception_exit; \
91 #define FIELD_ACCESS_FAILURE(method, field) do { \
92 field_access_failure ((cfg), (method), (field)); \
93 goto exception_exit; \
95 #define GENERIC_SHARING_FAILURE(opcode) do { \
97 gshared_failure (cfg, opcode, __FILE__, __LINE__); \
98 goto exception_exit; \
101 #define GSHAREDVT_FAILURE(opcode) do { \
102 if (cfg->gsharedvt) { \
103 gsharedvt_failure (cfg, opcode, __FILE__, __LINE__); \
104 goto exception_exit; \
107 #define OUT_OF_MEMORY_FAILURE do { \
108 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OUT_OF_MEMORY); \
109 goto exception_exit; \
111 #define DISABLE_AOT(cfg) do { \
112 if ((cfg)->verbose_level >= 2) \
113 printf ("AOT disabled: %s:%d\n", __FILE__, __LINE__); \
114 (cfg)->disable_aot = TRUE; \
116 #define LOAD_ERROR do { \
117 break_on_unverified (); \
118 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD); \
119 goto exception_exit; \
122 #define TYPE_LOAD_ERROR(klass) do { \
123 cfg->exception_ptr = klass; \
127 #define CHECK_CFG_ERROR do {\
128 if (!mono_error_ok (&cfg->error)) { \
129 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); \
130 goto mono_error_exit; \
134 /* Determine whenever 'ins' represents a load of the 'this' argument */
135 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
137 static int ldind_to_load_membase (int opcode);
138 static int stind_to_store_membase (int opcode);
140 int mono_op_to_op_imm (int opcode);
141 int mono_op_to_op_imm_noemul (int opcode);
143 MONO_API MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
145 static int inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
146 guchar *ip, guint real_offset, gboolean inline_always, MonoBasicBlock **out_cbb);
148 /* helper methods signatures */
149 static MonoMethodSignature *helper_sig_class_init_trampoline;
150 static MonoMethodSignature *helper_sig_domain_get;
151 static MonoMethodSignature *helper_sig_generic_class_init_trampoline;
152 static MonoMethodSignature *helper_sig_generic_class_init_trampoline_llvm;
153 static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
154 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline;
155 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline_llvm;
156 static MonoMethodSignature *helper_sig_monitor_enter_v4_trampoline_llvm;
159 * Instruction metadata
167 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
168 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
174 #if SIZEOF_REGISTER == 8 && SIZEOF_REGISTER == SIZEOF_VOID_P
179 /* keep in sync with the enum in mini.h */
182 #include "mini-ops.h"
187 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
188 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
190 * This should contain the index of the last sreg + 1. This is not the same
191 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
193 const gint8 ins_sreg_counts[] = {
194 #include "mini-ops.h"
199 #define MONO_INIT_VARINFO(vi,id) do { \
200 (vi)->range.first_use.pos.bid = 0xffff; \
206 mono_alloc_ireg (MonoCompile *cfg)
208 return alloc_ireg (cfg);
212 mono_alloc_lreg (MonoCompile *cfg)
214 return alloc_lreg (cfg);
218 mono_alloc_freg (MonoCompile *cfg)
220 return alloc_freg (cfg);
224 mono_alloc_preg (MonoCompile *cfg)
226 return alloc_preg (cfg);
230 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
232 return alloc_dreg (cfg, stack_type);
236 * mono_alloc_ireg_ref:
238 * Allocate an IREG, and mark it as holding a GC ref.
241 mono_alloc_ireg_ref (MonoCompile *cfg)
243 return alloc_ireg_ref (cfg);
247 * mono_alloc_ireg_mp:
249 * Allocate an IREG, and mark it as holding a managed pointer.
252 mono_alloc_ireg_mp (MonoCompile *cfg)
254 return alloc_ireg_mp (cfg);
258 * mono_alloc_ireg_copy:
260 * Allocate an IREG with the same GC type as VREG.
263 mono_alloc_ireg_copy (MonoCompile *cfg, guint32 vreg)
265 if (vreg_is_ref (cfg, vreg))
266 return alloc_ireg_ref (cfg);
267 else if (vreg_is_mp (cfg, vreg))
268 return alloc_ireg_mp (cfg);
270 return alloc_ireg (cfg);
274 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
279 type = mini_replace_type (type);
281 switch (type->type) {
284 case MONO_TYPE_BOOLEAN:
296 case MONO_TYPE_FNPTR:
298 case MONO_TYPE_CLASS:
299 case MONO_TYPE_STRING:
300 case MONO_TYPE_OBJECT:
301 case MONO_TYPE_SZARRAY:
302 case MONO_TYPE_ARRAY:
306 #if SIZEOF_REGISTER == 8
315 case MONO_TYPE_VALUETYPE:
316 if (type->data.klass->enumtype) {
317 type = mono_class_enum_basetype (type->data.klass);
320 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
323 case MONO_TYPE_TYPEDBYREF:
325 case MONO_TYPE_GENERICINST:
326 type = &type->data.generic_class->container_class->byval_arg;
330 g_assert (cfg->generic_sharing_context);
331 if (mini_type_var_is_vt (cfg, type))
336 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
342 mono_print_bb (MonoBasicBlock *bb, const char *msg)
347 printf ("\n%s %d: [IN: ", msg, bb->block_num);
348 for (i = 0; i < bb->in_count; ++i)
349 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
351 for (i = 0; i < bb->out_count; ++i)
352 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
354 for (tree = bb->code; tree; tree = tree->next)
355 mono_print_ins_index (-1, tree);
359 mono_create_helper_signatures (void)
361 helper_sig_domain_get = mono_create_icall_signature ("ptr");
362 helper_sig_class_init_trampoline = mono_create_icall_signature ("void");
363 helper_sig_generic_class_init_trampoline = mono_create_icall_signature ("void");
364 helper_sig_generic_class_init_trampoline_llvm = mono_create_icall_signature ("void ptr");
365 helper_sig_rgctx_lazy_fetch_trampoline = mono_create_icall_signature ("ptr ptr");
366 helper_sig_monitor_enter_exit_trampoline = mono_create_icall_signature ("void");
367 helper_sig_monitor_enter_exit_trampoline_llvm = mono_create_icall_signature ("void object");
368 helper_sig_monitor_enter_v4_trampoline_llvm = mono_create_icall_signature ("void object ptr");
371 static MONO_NEVER_INLINE void
372 break_on_unverified (void)
374 if (mini_get_debug_options ()->break_on_unverified)
378 static MONO_NEVER_INLINE void
379 method_access_failure (MonoCompile *cfg, MonoMethod *method, MonoMethod *cil_method)
381 char *method_fname = mono_method_full_name (method, TRUE);
382 char *cil_method_fname = mono_method_full_name (cil_method, TRUE);
383 mono_cfg_set_exception (cfg, MONO_EXCEPTION_METHOD_ACCESS);
384 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname);
385 g_free (method_fname);
386 g_free (cil_method_fname);
389 static MONO_NEVER_INLINE void
390 field_access_failure (MonoCompile *cfg, MonoMethod *method, MonoClassField *field)
392 char *method_fname = mono_method_full_name (method, TRUE);
393 char *field_fname = mono_field_full_name (field);
394 mono_cfg_set_exception (cfg, MONO_EXCEPTION_FIELD_ACCESS);
395 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname);
396 g_free (method_fname);
397 g_free (field_fname);
400 static MONO_NEVER_INLINE void
401 inline_failure (MonoCompile *cfg, const char *msg)
403 if (cfg->verbose_level >= 2)
404 printf ("inline failed: %s\n", msg);
405 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INLINE_FAILED);
408 static MONO_NEVER_INLINE void
409 gshared_failure (MonoCompile *cfg, int opcode, const char *file, int line)
411 if (cfg->verbose_level > 2) \
412 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), __LINE__);
413 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
416 static MONO_NEVER_INLINE void
417 gsharedvt_failure (MonoCompile *cfg, int opcode, const char *file, int line)
419 cfg->exception_message = g_strdup_printf ("gsharedvt failed for method %s.%s.%s/%d opcode %s %s:%d", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), file, line);
420 if (cfg->verbose_level >= 2)
421 printf ("%s\n", cfg->exception_message);
422 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
426 * When using gsharedvt, some instatiations might be verifiable, and some might be not. i.e.
427 * foo<T> (int i) { ldarg.0; box T; }
429 #define UNVERIFIED do { \
430 if (cfg->gsharedvt) { \
431 if (cfg->verbose_level > 2) \
432 printf ("gsharedvt method failed to verify, falling back to instantiation.\n"); \
433 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
434 goto exception_exit; \
436 break_on_unverified (); \
440 #define GET_BBLOCK(cfg,tblock,ip) do { \
441 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
443 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
444 NEW_BBLOCK (cfg, (tblock)); \
445 (tblock)->cil_code = (ip); \
446 ADD_BBLOCK (cfg, (tblock)); \
450 #if defined(TARGET_X86) || defined(TARGET_AMD64)
451 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
452 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
453 (dest)->dreg = alloc_ireg_mp ((cfg)); \
454 (dest)->sreg1 = (sr1); \
455 (dest)->sreg2 = (sr2); \
456 (dest)->inst_imm = (imm); \
457 (dest)->backend.shift_amount = (shift); \
458 MONO_ADD_INS ((cfg)->cbb, (dest)); \
462 #if SIZEOF_REGISTER == 8
463 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
464 /* FIXME: Need to add many more cases */ \
465 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
467 int dr = alloc_preg (cfg); \
468 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
469 (ins)->sreg2 = widen->dreg; \
473 #define ADD_WIDEN_OP(ins, arg1, arg2)
476 #define ADD_BINOP(op) do { \
477 MONO_INST_NEW (cfg, ins, (op)); \
479 ins->sreg1 = sp [0]->dreg; \
480 ins->sreg2 = sp [1]->dreg; \
481 type_from_op (ins, sp [0], sp [1]); \
483 /* Have to insert a widening op */ \
484 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
485 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
486 MONO_ADD_INS ((cfg)->cbb, (ins)); \
487 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
490 #define ADD_UNOP(op) do { \
491 MONO_INST_NEW (cfg, ins, (op)); \
493 ins->sreg1 = sp [0]->dreg; \
494 type_from_op (ins, sp [0], NULL); \
496 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
497 MONO_ADD_INS ((cfg)->cbb, (ins)); \
498 *sp++ = mono_decompose_opcode (cfg, ins); \
501 #define ADD_BINCOND(next_block) do { \
504 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
505 cmp->sreg1 = sp [0]->dreg; \
506 cmp->sreg2 = sp [1]->dreg; \
507 type_from_op (cmp, sp [0], sp [1]); \
509 type_from_op (ins, sp [0], sp [1]); \
510 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
511 GET_BBLOCK (cfg, tblock, target); \
512 link_bblock (cfg, bblock, tblock); \
513 ins->inst_true_bb = tblock; \
514 if ((next_block)) { \
515 link_bblock (cfg, bblock, (next_block)); \
516 ins->inst_false_bb = (next_block); \
517 start_new_bblock = 1; \
519 GET_BBLOCK (cfg, tblock, ip); \
520 link_bblock (cfg, bblock, tblock); \
521 ins->inst_false_bb = tblock; \
522 start_new_bblock = 2; \
524 if (sp != stack_start) { \
525 handle_stack_args (cfg, stack_start, sp - stack_start); \
526 CHECK_UNVERIFIABLE (cfg); \
528 MONO_ADD_INS (bblock, cmp); \
529 MONO_ADD_INS (bblock, ins); \
533 * link_bblock: Links two basic blocks
535 * links two basic blocks in the control flow graph, the 'from'
536 * argument is the starting block and the 'to' argument is the block
537 * the control flow ends to after 'from'.
540 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
542 MonoBasicBlock **newa;
546 if (from->cil_code) {
548 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
550 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
553 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
555 printf ("edge from entry to exit\n");
560 for (i = 0; i < from->out_count; ++i) {
561 if (to == from->out_bb [i]) {
567 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
568 for (i = 0; i < from->out_count; ++i) {
569 newa [i] = from->out_bb [i];
577 for (i = 0; i < to->in_count; ++i) {
578 if (from == to->in_bb [i]) {
584 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
585 for (i = 0; i < to->in_count; ++i) {
586 newa [i] = to->in_bb [i];
595 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
597 link_bblock (cfg, from, to);
601 * mono_find_block_region:
603 * We mark each basic block with a region ID. We use that to avoid BB
604 * optimizations when blocks are in different regions.
607 * A region token that encodes where this region is, and information
608 * about the clause owner for this block.
610 * The region encodes the try/catch/filter clause that owns this block
611 * as well as the type. -1 is a special value that represents a block
612 * that is in none of try/catch/filter.
615 mono_find_block_region (MonoCompile *cfg, int offset)
617 MonoMethodHeader *header = cfg->header;
618 MonoExceptionClause *clause;
621 for (i = 0; i < header->num_clauses; ++i) {
622 clause = &header->clauses [i];
623 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
624 (offset < (clause->handler_offset)))
625 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
627 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
628 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
629 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
630 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
631 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
633 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
636 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
637 return ((i + 1) << 8) | clause->flags;
644 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
646 MonoMethodHeader *header = cfg->header;
647 MonoExceptionClause *clause;
651 for (i = 0; i < header->num_clauses; ++i) {
652 clause = &header->clauses [i];
653 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
654 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
655 if (clause->flags == type)
656 res = g_list_append (res, clause);
663 mono_create_spvar_for_region (MonoCompile *cfg, int region)
667 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
671 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
672 /* prevent it from being register allocated */
673 var->flags |= MONO_INST_VOLATILE;
675 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
679 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
681 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
685 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
689 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
693 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
694 /* prevent it from being register allocated */
695 var->flags |= MONO_INST_VOLATILE;
697 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
703 * Returns the type used in the eval stack when @type is loaded.
704 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
707 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
711 type = mini_replace_type (type);
712 inst->klass = klass = mono_class_from_mono_type (type);
714 inst->type = STACK_MP;
719 switch (type->type) {
721 inst->type = STACK_INV;
725 case MONO_TYPE_BOOLEAN:
731 inst->type = STACK_I4;
736 case MONO_TYPE_FNPTR:
737 inst->type = STACK_PTR;
739 case MONO_TYPE_CLASS:
740 case MONO_TYPE_STRING:
741 case MONO_TYPE_OBJECT:
742 case MONO_TYPE_SZARRAY:
743 case MONO_TYPE_ARRAY:
744 inst->type = STACK_OBJ;
748 inst->type = STACK_I8;
752 inst->type = STACK_R8;
754 case MONO_TYPE_VALUETYPE:
755 if (type->data.klass->enumtype) {
756 type = mono_class_enum_basetype (type->data.klass);
760 inst->type = STACK_VTYPE;
763 case MONO_TYPE_TYPEDBYREF:
764 inst->klass = mono_defaults.typed_reference_class;
765 inst->type = STACK_VTYPE;
767 case MONO_TYPE_GENERICINST:
768 type = &type->data.generic_class->container_class->byval_arg;
772 g_assert (cfg->generic_sharing_context);
773 if (mini_is_gsharedvt_type (cfg, type)) {
774 g_assert (cfg->gsharedvt);
775 inst->type = STACK_VTYPE;
777 inst->type = STACK_OBJ;
781 g_error ("unknown type 0x%02x in eval stack type", type->type);
786 * The following tables are used to quickly validate the IL code in type_from_op ().
789 bin_num_table [STACK_MAX] [STACK_MAX] = {
790 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
791 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
792 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
793 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
794 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV},
795 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
796 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
797 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
802 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV
805 /* reduce the size of this table */
807 bin_int_table [STACK_MAX] [STACK_MAX] = {
808 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
809 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
810 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
811 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
812 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
813 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
814 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
815 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
819 bin_comp_table [STACK_MAX] [STACK_MAX] = {
820 /* Inv i L p F & O vt */
822 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
823 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
824 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
825 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
826 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
827 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
828 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
831 /* reduce the size of this table */
833 shift_table [STACK_MAX] [STACK_MAX] = {
834 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
835 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
836 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
837 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
838 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
839 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
840 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
841 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
845 * Tables to map from the non-specific opcode to the matching
846 * type-specific opcode.
848 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
850 binops_op_map [STACK_MAX] = {
851 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD
854 /* handles from CEE_NEG to CEE_CONV_U8 */
856 unops_op_map [STACK_MAX] = {
857 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG
860 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
862 ovfops_op_map [STACK_MAX] = {
863 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2
866 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
868 ovf2ops_op_map [STACK_MAX] = {
869 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
872 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
874 ovf3ops_op_map [STACK_MAX] = {
875 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1
878 /* handles from CEE_BEQ to CEE_BLT_UN */
880 beqops_op_map [STACK_MAX] = {
881 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ
884 /* handles from CEE_CEQ to CEE_CLT_UN */
886 ceqops_op_map [STACK_MAX] = {
887 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ
891 * Sets ins->type (the type on the eval stack) according to the
892 * type of the opcode and the arguments to it.
893 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
895 * FIXME: this function sets ins->type unconditionally in some cases, but
896 * it should set it to invalid for some types (a conv.x on an object)
899 type_from_op (MonoInst *ins, MonoInst *src1, MonoInst *src2) {
901 switch (ins->opcode) {
908 /* FIXME: check unverifiable args for STACK_MP */
909 ins->type = bin_num_table [src1->type] [src2->type];
910 ins->opcode += binops_op_map [ins->type];
917 ins->type = bin_int_table [src1->type] [src2->type];
918 ins->opcode += binops_op_map [ins->type];
923 ins->type = shift_table [src1->type] [src2->type];
924 ins->opcode += binops_op_map [ins->type];
929 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
930 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
931 ins->opcode = OP_LCOMPARE;
932 else if (src1->type == STACK_R8)
933 ins->opcode = OP_FCOMPARE;
935 ins->opcode = OP_ICOMPARE;
937 case OP_ICOMPARE_IMM:
938 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
939 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
940 ins->opcode = OP_LCOMPARE_IMM;
952 ins->opcode += beqops_op_map [src1->type];
955 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
956 ins->opcode += ceqops_op_map [src1->type];
962 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
963 ins->opcode += ceqops_op_map [src1->type];
967 ins->type = neg_table [src1->type];
968 ins->opcode += unops_op_map [ins->type];
971 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
972 ins->type = src1->type;
974 ins->type = STACK_INV;
975 ins->opcode += unops_op_map [ins->type];
981 ins->type = STACK_I4;
982 ins->opcode += unops_op_map [src1->type];
985 ins->type = STACK_R8;
986 switch (src1->type) {
989 ins->opcode = OP_ICONV_TO_R_UN;
992 ins->opcode = OP_LCONV_TO_R_UN;
996 case CEE_CONV_OVF_I1:
997 case CEE_CONV_OVF_U1:
998 case CEE_CONV_OVF_I2:
999 case CEE_CONV_OVF_U2:
1000 case CEE_CONV_OVF_I4:
1001 case CEE_CONV_OVF_U4:
1002 ins->type = STACK_I4;
1003 ins->opcode += ovf3ops_op_map [src1->type];
1005 case CEE_CONV_OVF_I_UN:
1006 case CEE_CONV_OVF_U_UN:
1007 ins->type = STACK_PTR;
1008 ins->opcode += ovf2ops_op_map [src1->type];
1010 case CEE_CONV_OVF_I1_UN:
1011 case CEE_CONV_OVF_I2_UN:
1012 case CEE_CONV_OVF_I4_UN:
1013 case CEE_CONV_OVF_U1_UN:
1014 case CEE_CONV_OVF_U2_UN:
1015 case CEE_CONV_OVF_U4_UN:
1016 ins->type = STACK_I4;
1017 ins->opcode += ovf2ops_op_map [src1->type];
1020 ins->type = STACK_PTR;
1021 switch (src1->type) {
1023 ins->opcode = OP_ICONV_TO_U;
1027 #if SIZEOF_VOID_P == 8
1028 ins->opcode = OP_LCONV_TO_U;
1030 ins->opcode = OP_MOVE;
1034 ins->opcode = OP_LCONV_TO_U;
1037 ins->opcode = OP_FCONV_TO_U;
1043 ins->type = STACK_I8;
1044 ins->opcode += unops_op_map [src1->type];
1046 case CEE_CONV_OVF_I8:
1047 case CEE_CONV_OVF_U8:
1048 ins->type = STACK_I8;
1049 ins->opcode += ovf3ops_op_map [src1->type];
1051 case CEE_CONV_OVF_U8_UN:
1052 case CEE_CONV_OVF_I8_UN:
1053 ins->type = STACK_I8;
1054 ins->opcode += ovf2ops_op_map [src1->type];
1058 ins->type = STACK_R8;
1059 ins->opcode += unops_op_map [src1->type];
1062 ins->type = STACK_R8;
1066 ins->type = STACK_I4;
1067 ins->opcode += ovfops_op_map [src1->type];
1070 case CEE_CONV_OVF_I:
1071 case CEE_CONV_OVF_U:
1072 ins->type = STACK_PTR;
1073 ins->opcode += ovfops_op_map [src1->type];
1076 case CEE_ADD_OVF_UN:
1078 case CEE_MUL_OVF_UN:
1080 case CEE_SUB_OVF_UN:
1081 ins->type = bin_num_table [src1->type] [src2->type];
1082 ins->opcode += ovfops_op_map [src1->type];
1083 if (ins->type == STACK_R8)
1084 ins->type = STACK_INV;
1086 case OP_LOAD_MEMBASE:
1087 ins->type = STACK_PTR;
1089 case OP_LOADI1_MEMBASE:
1090 case OP_LOADU1_MEMBASE:
1091 case OP_LOADI2_MEMBASE:
1092 case OP_LOADU2_MEMBASE:
1093 case OP_LOADI4_MEMBASE:
1094 case OP_LOADU4_MEMBASE:
1095 ins->type = STACK_PTR;
1097 case OP_LOADI8_MEMBASE:
1098 ins->type = STACK_I8;
1100 case OP_LOADR4_MEMBASE:
1101 case OP_LOADR8_MEMBASE:
1102 ins->type = STACK_R8;
1105 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
1109 if (ins->type == STACK_MP)
1110 ins->klass = mono_defaults.object_class;
1115 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
1121 param_table [STACK_MAX] [STACK_MAX] = {
1126 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
1130 switch (args->type) {
1140 for (i = 0; i < sig->param_count; ++i) {
1141 switch (args [i].type) {
1145 if (!sig->params [i]->byref)
1149 if (sig->params [i]->byref)
1151 switch (sig->params [i]->type) {
1152 case MONO_TYPE_CLASS:
1153 case MONO_TYPE_STRING:
1154 case MONO_TYPE_OBJECT:
1155 case MONO_TYPE_SZARRAY:
1156 case MONO_TYPE_ARRAY:
1163 if (sig->params [i]->byref)
1165 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1174 /*if (!param_table [args [i].type] [sig->params [i]->type])
1182 * When we need a pointer to the current domain many times in a method, we
1183 * call mono_domain_get() once and we store the result in a local variable.
1184 * This function returns the variable that represents the MonoDomain*.
1186 inline static MonoInst *
1187 mono_get_domainvar (MonoCompile *cfg)
1189 if (!cfg->domainvar)
1190 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1191 return cfg->domainvar;
1195 * The got_var contains the address of the Global Offset Table when AOT
1199 mono_get_got_var (MonoCompile *cfg)
1201 #ifdef MONO_ARCH_NEED_GOT_VAR
1202 if (!cfg->compile_aot)
1204 if (!cfg->got_var) {
1205 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1207 return cfg->got_var;
1214 mono_get_vtable_var (MonoCompile *cfg)
1216 g_assert (cfg->generic_sharing_context);
1218 if (!cfg->rgctx_var) {
1219 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1220 /* force the var to be stack allocated */
1221 cfg->rgctx_var->flags |= MONO_INST_VOLATILE;
1224 return cfg->rgctx_var;
1228 type_from_stack_type (MonoInst *ins) {
1229 switch (ins->type) {
1230 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1231 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1232 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1233 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1235 return &ins->klass->this_arg;
1236 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1237 case STACK_VTYPE: return &ins->klass->byval_arg;
1239 g_error ("stack type %d to monotype not handled\n", ins->type);
1244 static G_GNUC_UNUSED int
1245 type_to_stack_type (MonoType *t)
1247 t = mono_type_get_underlying_type (t);
1251 case MONO_TYPE_BOOLEAN:
1254 case MONO_TYPE_CHAR:
1261 case MONO_TYPE_FNPTR:
1263 case MONO_TYPE_CLASS:
1264 case MONO_TYPE_STRING:
1265 case MONO_TYPE_OBJECT:
1266 case MONO_TYPE_SZARRAY:
1267 case MONO_TYPE_ARRAY:
1275 case MONO_TYPE_VALUETYPE:
1276 case MONO_TYPE_TYPEDBYREF:
1278 case MONO_TYPE_GENERICINST:
1279 if (mono_type_generic_inst_is_valuetype (t))
1285 g_assert_not_reached ();
1292 array_access_to_klass (int opcode)
1296 return mono_defaults.byte_class;
1298 return mono_defaults.uint16_class;
1301 return mono_defaults.int_class;
1304 return mono_defaults.sbyte_class;
1307 return mono_defaults.int16_class;
1310 return mono_defaults.int32_class;
1312 return mono_defaults.uint32_class;
1315 return mono_defaults.int64_class;
1318 return mono_defaults.single_class;
1321 return mono_defaults.double_class;
1322 case CEE_LDELEM_REF:
1323 case CEE_STELEM_REF:
1324 return mono_defaults.object_class;
1326 g_assert_not_reached ();
1332 * We try to share variables when possible
1335 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1340 /* inlining can result in deeper stacks */
1341 if (slot >= cfg->header->max_stack)
1342 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1344 pos = ins->type - 1 + slot * STACK_MAX;
1346 switch (ins->type) {
1353 if ((vnum = cfg->intvars [pos]))
1354 return cfg->varinfo [vnum];
1355 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1356 cfg->intvars [pos] = res->inst_c0;
1359 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1365 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1368 * Don't use this if a generic_context is set, since that means AOT can't
1369 * look up the method using just the image+token.
1370 * table == 0 means this is a reference made from a wrapper.
1372 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1373 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1374 jump_info_token->image = image;
1375 jump_info_token->token = token;
1376 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1381 * This function is called to handle items that are left on the evaluation stack
1382 * at basic block boundaries. What happens is that we save the values to local variables
1383 * and we reload them later when first entering the target basic block (with the
1384 * handle_loaded_temps () function).
1385 * A single joint point will use the same variables (stored in the array bb->out_stack or
1386 * bb->in_stack, if the basic block is before or after the joint point).
1388 * This function needs to be called _before_ emitting the last instruction of
1389 * the bb (i.e. before emitting a branch).
1390 * If the stack merge fails at a join point, cfg->unverifiable is set.
1393 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1396 MonoBasicBlock *bb = cfg->cbb;
1397 MonoBasicBlock *outb;
1398 MonoInst *inst, **locals;
1403 if (cfg->verbose_level > 3)
1404 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1405 if (!bb->out_scount) {
1406 bb->out_scount = count;
1407 //printf ("bblock %d has out:", bb->block_num);
1409 for (i = 0; i < bb->out_count; ++i) {
1410 outb = bb->out_bb [i];
1411 /* exception handlers are linked, but they should not be considered for stack args */
1412 if (outb->flags & BB_EXCEPTION_HANDLER)
1414 //printf (" %d", outb->block_num);
1415 if (outb->in_stack) {
1417 bb->out_stack = outb->in_stack;
1423 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1424 for (i = 0; i < count; ++i) {
1426 * try to reuse temps already allocated for this purpouse, if they occupy the same
1427 * stack slot and if they are of the same type.
1428 * This won't cause conflicts since if 'local' is used to
1429 * store one of the values in the in_stack of a bblock, then
1430 * the same variable will be used for the same outgoing stack
1432 * This doesn't work when inlining methods, since the bblocks
1433 * in the inlined methods do not inherit their in_stack from
1434 * the bblock they are inlined to. See bug #58863 for an
1437 if (cfg->inlined_method)
1438 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1440 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1445 for (i = 0; i < bb->out_count; ++i) {
1446 outb = bb->out_bb [i];
1447 /* exception handlers are linked, but they should not be considered for stack args */
1448 if (outb->flags & BB_EXCEPTION_HANDLER)
1450 if (outb->in_scount) {
1451 if (outb->in_scount != bb->out_scount) {
1452 cfg->unverifiable = TRUE;
1455 continue; /* check they are the same locals */
1457 outb->in_scount = count;
1458 outb->in_stack = bb->out_stack;
1461 locals = bb->out_stack;
1463 for (i = 0; i < count; ++i) {
1464 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1465 inst->cil_code = sp [i]->cil_code;
1466 sp [i] = locals [i];
1467 if (cfg->verbose_level > 3)
1468 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1472 * It is possible that the out bblocks already have in_stack assigned, and
1473 * the in_stacks differ. In this case, we will store to all the different
1480 /* Find a bblock which has a different in_stack */
1482 while (bindex < bb->out_count) {
1483 outb = bb->out_bb [bindex];
1484 /* exception handlers are linked, but they should not be considered for stack args */
1485 if (outb->flags & BB_EXCEPTION_HANDLER) {
1489 if (outb->in_stack != locals) {
1490 for (i = 0; i < count; ++i) {
1491 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1492 inst->cil_code = sp [i]->cil_code;
1493 sp [i] = locals [i];
1494 if (cfg->verbose_level > 3)
1495 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1497 locals = outb->in_stack;
1506 /* Emit code which loads interface_offsets [klass->interface_id]
1507 * The array is stored in memory before vtable.
1510 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1512 if (cfg->compile_aot) {
1513 int ioffset_reg = alloc_preg (cfg);
1514 int iid_reg = alloc_preg (cfg);
1516 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1517 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1518 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1521 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1526 mini_emit_interface_bitmap_check (MonoCompile *cfg, int intf_bit_reg, int base_reg, int offset, MonoClass *klass)
1528 int ibitmap_reg = alloc_preg (cfg);
1529 #ifdef COMPRESSED_INTERFACE_BITMAP
1531 MonoInst *res, *ins;
1532 NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, ibitmap_reg, base_reg, offset);
1533 MONO_ADD_INS (cfg->cbb, ins);
1535 if (cfg->compile_aot)
1536 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_IID, klass);
1538 EMIT_NEW_ICONST (cfg, args [1], klass->interface_id);
1539 res = mono_emit_jit_icall (cfg, mono_class_interface_match, args);
1540 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, intf_bit_reg, res->dreg);
1542 int ibitmap_byte_reg = alloc_preg (cfg);
1544 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, base_reg, offset);
1546 if (cfg->compile_aot) {
1547 int iid_reg = alloc_preg (cfg);
1548 int shifted_iid_reg = alloc_preg (cfg);
1549 int ibitmap_byte_address_reg = alloc_preg (cfg);
1550 int masked_iid_reg = alloc_preg (cfg);
1551 int iid_one_bit_reg = alloc_preg (cfg);
1552 int iid_bit_reg = alloc_preg (cfg);
1553 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1554 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1555 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1556 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1557 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1558 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1559 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1560 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1562 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1563 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1569 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1570 * stored in "klass_reg" implements the interface "klass".
1573 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1575 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, interface_bitmap), klass);
1579 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1580 * stored in "vtable_reg" implements the interface "klass".
1583 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1585 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, interface_bitmap), klass);
1589 * Emit code which checks whenever the interface id of @klass is smaller than
1590 * than the value given by max_iid_reg.
1593 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1594 MonoBasicBlock *false_target)
1596 if (cfg->compile_aot) {
1597 int iid_reg = alloc_preg (cfg);
1598 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1599 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1602 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1604 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1606 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1609 /* Same as above, but obtains max_iid from a vtable */
1611 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1612 MonoBasicBlock *false_target)
1614 int max_iid_reg = alloc_preg (cfg);
1616 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, max_interface_id));
1617 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1620 /* Same as above, but obtains max_iid from a klass */
1622 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1623 MonoBasicBlock *false_target)
1625 int max_iid_reg = alloc_preg (cfg);
1627 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, max_interface_id));
1628 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1632 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1634 int idepth_reg = alloc_preg (cfg);
1635 int stypes_reg = alloc_preg (cfg);
1636 int stype = alloc_preg (cfg);
1638 mono_class_setup_supertypes (klass);
1640 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1641 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, idepth));
1642 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1643 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1645 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, supertypes));
1646 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1648 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1649 } else if (cfg->compile_aot) {
1650 int const_reg = alloc_preg (cfg);
1651 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1652 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1654 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1656 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1660 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1662 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1666 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1668 int intf_reg = alloc_preg (cfg);
1670 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1671 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1672 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1674 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1676 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1680 * Variant of the above that takes a register to the class, not the vtable.
1683 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1685 int intf_bit_reg = alloc_preg (cfg);
1687 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1688 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1689 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1691 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1693 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1697 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1700 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1701 } else if (cfg->compile_aot) {
1702 int const_reg = alloc_preg (cfg);
1703 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1704 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1706 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1708 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1712 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1714 mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1718 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1720 if (cfg->compile_aot) {
1721 int const_reg = alloc_preg (cfg);
1722 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1723 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1725 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1727 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1731 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1734 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1737 int rank_reg = alloc_preg (cfg);
1738 int eclass_reg = alloc_preg (cfg);
1740 g_assert (!klass_inst);
1741 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, rank));
1742 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1743 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1744 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
1745 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, cast_class));
1746 if (klass->cast_class == mono_defaults.object_class) {
1747 int parent_reg = alloc_preg (cfg);
1748 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, MONO_STRUCT_OFFSET (MonoClass, parent));
1749 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1750 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1751 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1752 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1753 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1754 } else if (klass->cast_class == mono_defaults.enum_class) {
1755 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1756 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1757 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1759 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1760 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1763 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1764 /* Check that the object is a vector too */
1765 int bounds_reg = alloc_preg (cfg);
1766 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, MONO_STRUCT_OFFSET (MonoArray, bounds));
1767 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1768 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1771 int idepth_reg = alloc_preg (cfg);
1772 int stypes_reg = alloc_preg (cfg);
1773 int stype = alloc_preg (cfg);
1775 mono_class_setup_supertypes (klass);
1777 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1778 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, idepth));
1779 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1780 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1782 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, supertypes));
1783 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1784 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1789 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1791 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1795 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1799 g_assert (val == 0);
1804 if ((size <= SIZEOF_REGISTER) && (size <= align)) {
1807 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1810 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1813 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1815 #if SIZEOF_REGISTER == 8
1817 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1823 val_reg = alloc_preg (cfg);
1825 if (SIZEOF_REGISTER == 8)
1826 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1828 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1831 /* This could be optimized further if neccesary */
1833 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1840 #if !NO_UNALIGNED_ACCESS
1841 if (SIZEOF_REGISTER == 8) {
1843 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1848 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1856 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1861 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1866 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1873 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1880 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1881 g_assert (size < 10000);
1884 /* This could be optimized further if neccesary */
1886 cur_reg = alloc_preg (cfg);
1887 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1888 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1895 #if !NO_UNALIGNED_ACCESS
1896 if (SIZEOF_REGISTER == 8) {
1898 cur_reg = alloc_preg (cfg);
1899 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1900 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1909 cur_reg = alloc_preg (cfg);
1910 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1911 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1917 cur_reg = alloc_preg (cfg);
1918 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1919 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1925 cur_reg = alloc_preg (cfg);
1926 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1927 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1935 emit_tls_set (MonoCompile *cfg, int sreg1, int tls_key)
1939 if (cfg->compile_aot) {
1940 EMIT_NEW_TLS_OFFSETCONST (cfg, c, tls_key);
1941 MONO_INST_NEW (cfg, ins, OP_TLS_SET_REG);
1943 ins->sreg2 = c->dreg;
1944 MONO_ADD_INS (cfg->cbb, ins);
1946 MONO_INST_NEW (cfg, ins, OP_TLS_SET);
1948 ins->inst_offset = mini_get_tls_offset (tls_key);
1949 MONO_ADD_INS (cfg->cbb, ins);
1956 * Emit IR to push the current LMF onto the LMF stack.
1959 emit_push_lmf (MonoCompile *cfg)
1962 * Emit IR to push the LMF:
1963 * lmf_addr = <lmf_addr from tls>
1964 * lmf->lmf_addr = lmf_addr
1965 * lmf->prev_lmf = *lmf_addr
1968 int lmf_reg, prev_lmf_reg;
1969 MonoInst *ins, *lmf_ins;
1974 if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
1975 /* Load current lmf */
1976 lmf_ins = mono_get_lmf_intrinsic (cfg);
1978 MONO_ADD_INS (cfg->cbb, lmf_ins);
1979 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
1980 lmf_reg = ins->dreg;
1981 /* Save previous_lmf */
1982 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), lmf_ins->dreg);
1984 emit_tls_set (cfg, lmf_reg, TLS_KEY_LMF);
1987 * Store lmf_addr in a variable, so it can be allocated to a global register.
1989 if (!cfg->lmf_addr_var)
1990 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1993 ins = mono_get_jit_tls_intrinsic (cfg);
1995 int jit_tls_dreg = ins->dreg;
1997 MONO_ADD_INS (cfg->cbb, ins);
1998 lmf_reg = alloc_preg (cfg);
1999 EMIT_NEW_BIALU_IMM (cfg, lmf_ins, OP_PADD_IMM, lmf_reg, jit_tls_dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
2001 lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
2004 lmf_ins = mono_get_lmf_addr_intrinsic (cfg);
2006 MONO_ADD_INS (cfg->cbb, lmf_ins);
2009 MonoInst *args [16], *jit_tls_ins, *ins;
2011 /* Inline mono_get_lmf_addr () */
2012 /* jit_tls = pthread_getspecific (mono_jit_tls_id); lmf_addr = &jit_tls->lmf; */
2014 /* Load mono_jit_tls_id */
2015 EMIT_NEW_AOTCONST (cfg, args [0], MONO_PATCH_INFO_JIT_TLS_ID, NULL);
2016 /* call pthread_getspecific () */
2017 jit_tls_ins = mono_emit_jit_icall (cfg, pthread_getspecific, args);
2018 /* lmf_addr = &jit_tls->lmf */
2019 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, cfg->lmf_addr_var->dreg, jit_tls_ins->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
2022 lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
2026 lmf_ins->dreg = cfg->lmf_addr_var->dreg;
2028 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
2029 lmf_reg = ins->dreg;
2031 prev_lmf_reg = alloc_preg (cfg);
2032 /* Save previous_lmf */
2033 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, cfg->lmf_addr_var->dreg, 0);
2034 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), prev_lmf_reg);
2036 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, cfg->lmf_addr_var->dreg, 0, lmf_reg);
2043 * Emit IR to pop the current LMF from the LMF stack.
2046 emit_pop_lmf (MonoCompile *cfg)
2048 int lmf_reg, lmf_addr_reg, prev_lmf_reg;
2054 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
2055 lmf_reg = ins->dreg;
2057 if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
2058 /* Load previous_lmf */
2059 prev_lmf_reg = alloc_preg (cfg);
2060 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
2062 emit_tls_set (cfg, prev_lmf_reg, TLS_KEY_LMF);
2065 * Emit IR to pop the LMF:
2066 * *(lmf->lmf_addr) = lmf->prev_lmf
2068 /* This could be called before emit_push_lmf () */
2069 if (!cfg->lmf_addr_var)
2070 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2071 lmf_addr_reg = cfg->lmf_addr_var->dreg;
2073 prev_lmf_reg = alloc_preg (cfg);
2074 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
2075 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_addr_reg, 0, prev_lmf_reg);
2080 emit_instrumentation_call (MonoCompile *cfg, void *func)
2082 MonoInst *iargs [1];
2085 * Avoid instrumenting inlined methods since it can
2086 * distort profiling results.
2088 if (cfg->method != cfg->current_method)
2091 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE) {
2092 EMIT_NEW_METHODCONST (cfg, iargs [0], cfg->method);
2093 mono_emit_jit_icall (cfg, func, iargs);
2098 ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
2101 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2104 type = mini_get_basic_type_from_generic (gsctx, type);
2105 type = mini_replace_type (type);
2106 switch (type->type) {
2107 case MONO_TYPE_VOID:
2108 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALL_MEMBASE: OP_VOIDCALL;
2111 case MONO_TYPE_BOOLEAN:
2114 case MONO_TYPE_CHAR:
2117 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2121 case MONO_TYPE_FNPTR:
2122 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2123 case MONO_TYPE_CLASS:
2124 case MONO_TYPE_STRING:
2125 case MONO_TYPE_OBJECT:
2126 case MONO_TYPE_SZARRAY:
2127 case MONO_TYPE_ARRAY:
2128 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2131 return calli? OP_LCALL_REG: virt? OP_LCALL_MEMBASE: OP_LCALL;
2134 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
2135 case MONO_TYPE_VALUETYPE:
2136 if (type->data.klass->enumtype) {
2137 type = mono_class_enum_basetype (type->data.klass);
2140 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2141 case MONO_TYPE_TYPEDBYREF:
2142 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2143 case MONO_TYPE_GENERICINST:
2144 type = &type->data.generic_class->container_class->byval_arg;
2147 case MONO_TYPE_MVAR:
2149 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2151 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
2157 * target_type_is_incompatible:
2158 * @cfg: MonoCompile context
2160 * Check that the item @arg on the evaluation stack can be stored
2161 * in the target type (can be a local, or field, etc).
2162 * The cfg arg can be used to check if we need verification or just
2165 * Returns: non-0 value if arg can't be stored on a target.
2168 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
2170 MonoType *simple_type;
2173 target = mini_replace_type (target);
2174 if (target->byref) {
2175 /* FIXME: check that the pointed to types match */
2176 if (arg->type == STACK_MP)
2177 return arg->klass != mono_class_from_mono_type (target);
2178 if (arg->type == STACK_PTR)
2183 simple_type = mono_type_get_underlying_type (target);
2184 switch (simple_type->type) {
2185 case MONO_TYPE_VOID:
2189 case MONO_TYPE_BOOLEAN:
2192 case MONO_TYPE_CHAR:
2195 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
2199 /* STACK_MP is needed when setting pinned locals */
2200 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2205 case MONO_TYPE_FNPTR:
2207 * Some opcodes like ldloca returns 'transient pointers' which can be stored in
2208 * in native int. (#688008).
2210 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2213 case MONO_TYPE_CLASS:
2214 case MONO_TYPE_STRING:
2215 case MONO_TYPE_OBJECT:
2216 case MONO_TYPE_SZARRAY:
2217 case MONO_TYPE_ARRAY:
2218 if (arg->type != STACK_OBJ)
2220 /* FIXME: check type compatibility */
2224 if (arg->type != STACK_I8)
2229 if (arg->type != STACK_R8)
2232 case MONO_TYPE_VALUETYPE:
2233 if (arg->type != STACK_VTYPE)
2235 klass = mono_class_from_mono_type (simple_type);
2236 if (klass != arg->klass)
2239 case MONO_TYPE_TYPEDBYREF:
2240 if (arg->type != STACK_VTYPE)
2242 klass = mono_class_from_mono_type (simple_type);
2243 if (klass != arg->klass)
2246 case MONO_TYPE_GENERICINST:
2247 if (mono_type_generic_inst_is_valuetype (simple_type)) {
2248 if (arg->type != STACK_VTYPE)
2250 klass = mono_class_from_mono_type (simple_type);
2251 if (klass != arg->klass)
2255 if (arg->type != STACK_OBJ)
2257 /* FIXME: check type compatibility */
2261 case MONO_TYPE_MVAR:
2262 g_assert (cfg->generic_sharing_context);
2263 if (mini_type_var_is_vt (cfg, simple_type)) {
2264 if (arg->type != STACK_VTYPE)
2267 if (arg->type != STACK_OBJ)
2272 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
2278 * Prepare arguments for passing to a function call.
2279 * Return a non-zero value if the arguments can't be passed to the given
2281 * The type checks are not yet complete and some conversions may need
2282 * casts on 32 or 64 bit architectures.
2284 * FIXME: implement this using target_type_is_incompatible ()
2287 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
2289 MonoType *simple_type;
2293 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
2297 for (i = 0; i < sig->param_count; ++i) {
2298 if (sig->params [i]->byref) {
2299 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
2303 simple_type = sig->params [i];
2304 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
2306 switch (simple_type->type) {
2307 case MONO_TYPE_VOID:
2312 case MONO_TYPE_BOOLEAN:
2315 case MONO_TYPE_CHAR:
2318 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2324 case MONO_TYPE_FNPTR:
2325 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2328 case MONO_TYPE_CLASS:
2329 case MONO_TYPE_STRING:
2330 case MONO_TYPE_OBJECT:
2331 case MONO_TYPE_SZARRAY:
2332 case MONO_TYPE_ARRAY:
2333 if (args [i]->type != STACK_OBJ)
2338 if (args [i]->type != STACK_I8)
2343 if (args [i]->type != STACK_R8)
2346 case MONO_TYPE_VALUETYPE:
2347 if (simple_type->data.klass->enumtype) {
2348 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2351 if (args [i]->type != STACK_VTYPE)
2354 case MONO_TYPE_TYPEDBYREF:
2355 if (args [i]->type != STACK_VTYPE)
2358 case MONO_TYPE_GENERICINST:
2359 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2362 case MONO_TYPE_MVAR:
2364 if (args [i]->type != STACK_VTYPE)
2368 g_error ("unknown type 0x%02x in check_call_signature",
2376 callvirt_to_call (int opcode)
2379 case OP_CALL_MEMBASE:
2381 case OP_VOIDCALL_MEMBASE:
2383 case OP_FCALL_MEMBASE:
2385 case OP_VCALL_MEMBASE:
2387 case OP_LCALL_MEMBASE:
2390 g_assert_not_reached ();
2396 /* Either METHOD or IMT_ARG needs to be set */
2398 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoMethod *method, MonoInst *imt_arg)
2402 if (COMPILE_LLVM (cfg)) {
2403 method_reg = alloc_preg (cfg);
2406 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2407 } else if (cfg->compile_aot) {
2408 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, method, MONO_PATCH_INFO_METHODCONST);
2411 MONO_INST_NEW (cfg, ins, OP_PCONST);
2412 ins->inst_p0 = method;
2413 ins->dreg = method_reg;
2414 MONO_ADD_INS (cfg->cbb, ins);
2418 call->imt_arg_reg = method_reg;
2420 #ifdef MONO_ARCH_IMT_REG
2421 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2423 /* Need this to keep the IMT arg alive */
2424 mono_call_inst_add_outarg_reg (cfg, call, method_reg, 0, FALSE);
2429 #ifdef MONO_ARCH_IMT_REG
2430 method_reg = alloc_preg (cfg);
2433 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2434 } else if (cfg->compile_aot) {
2435 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, method, MONO_PATCH_INFO_METHODCONST);
2438 MONO_INST_NEW (cfg, ins, OP_PCONST);
2439 ins->inst_p0 = method;
2440 ins->dreg = method_reg;
2441 MONO_ADD_INS (cfg->cbb, ins);
2444 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2446 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2450 static MonoJumpInfo *
2451 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2453 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2457 ji->data.target = target;
2463 mini_class_check_context_used (MonoCompile *cfg, MonoClass *klass)
2465 if (cfg->generic_sharing_context)
2466 return mono_class_check_context_used (klass);
2472 mini_method_check_context_used (MonoCompile *cfg, MonoMethod *method)
2474 if (cfg->generic_sharing_context)
2475 return mono_method_check_context_used (method);
2481 * check_method_sharing:
2483 * Check whenever the vtable or an mrgctx needs to be passed when calling CMETHOD.
2486 check_method_sharing (MonoCompile *cfg, MonoMethod *cmethod, gboolean *out_pass_vtable, gboolean *out_pass_mrgctx)
2488 gboolean pass_vtable = FALSE;
2489 gboolean pass_mrgctx = FALSE;
2491 if (((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
2492 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
2493 gboolean sharable = FALSE;
2495 if (mono_method_is_generic_sharable (cmethod, TRUE)) {
2498 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
2499 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
2500 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
2502 sharable = sharing_enabled && context_sharable;
2506 * Pass vtable iff target method might
2507 * be shared, which means that sharing
2508 * is enabled for its class and its
2509 * context is sharable (and it's not a
2512 if (sharable && !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
2516 if (mini_method_get_context (cmethod) &&
2517 mini_method_get_context (cmethod)->method_inst) {
2518 g_assert (!pass_vtable);
2520 if (mono_method_is_generic_sharable (cmethod, TRUE)) {
2523 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
2524 MonoGenericContext *context = mini_method_get_context (cmethod);
2525 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
2527 if (sharing_enabled && context_sharable)
2529 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, mono_method_signature (cmethod)))
2534 if (out_pass_vtable)
2535 *out_pass_vtable = pass_vtable;
2536 if (out_pass_mrgctx)
2537 *out_pass_mrgctx = pass_mrgctx;
2540 inline static MonoCallInst *
2541 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2542 MonoInst **args, int calli, int virtual, int tail, int rgctx, int unbox_trampoline)
2546 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2551 emit_instrumentation_call (cfg, mono_profiler_method_leave);
2553 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2555 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
2558 call->signature = sig;
2559 call->rgctx_reg = rgctx;
2560 sig_ret = mini_replace_type (sig->ret);
2562 type_to_eval_stack_type ((cfg), sig_ret, &call->inst);
2565 if (mini_type_is_vtype (cfg, sig_ret)) {
2566 call->vret_var = cfg->vret_addr;
2567 //g_assert_not_reached ();
2569 } else if (mini_type_is_vtype (cfg, sig_ret)) {
2570 MonoInst *temp = mono_compile_create_var (cfg, sig_ret, OP_LOCAL);
2573 temp->backend.is_pinvoke = sig->pinvoke;
2576 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2577 * address of return value to increase optimization opportunities.
2578 * Before vtype decomposition, the dreg of the call ins itself represents the
2579 * fact the call modifies the return value. After decomposition, the call will
2580 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2581 * will be transformed into an LDADDR.
2583 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2584 loada->dreg = alloc_preg (cfg);
2585 loada->inst_p0 = temp;
2586 /* We reference the call too since call->dreg could change during optimization */
2587 loada->inst_p1 = call;
2588 MONO_ADD_INS (cfg->cbb, loada);
2590 call->inst.dreg = temp->dreg;
2592 call->vret_var = loada;
2593 } else if (!MONO_TYPE_IS_VOID (sig_ret))
2594 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2596 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2597 if (COMPILE_SOFT_FLOAT (cfg)) {
2599 * If the call has a float argument, we would need to do an r8->r4 conversion using
2600 * an icall, but that cannot be done during the call sequence since it would clobber
2601 * the call registers + the stack. So we do it before emitting the call.
2603 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2605 MonoInst *in = call->args [i];
2607 if (i >= sig->hasthis)
2608 t = sig->params [i - sig->hasthis];
2610 t = &mono_defaults.int_class->byval_arg;
2611 t = mono_type_get_underlying_type (t);
2613 if (!t->byref && t->type == MONO_TYPE_R4) {
2614 MonoInst *iargs [1];
2618 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2620 /* The result will be in an int vreg */
2621 call->args [i] = conv;
2627 call->need_unbox_trampoline = unbox_trampoline;
2630 if (COMPILE_LLVM (cfg))
2631 mono_llvm_emit_call (cfg, call);
2633 mono_arch_emit_call (cfg, call);
2635 mono_arch_emit_call (cfg, call);
2638 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2639 cfg->flags |= MONO_CFG_HAS_CALLS;
2645 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2647 #ifdef MONO_ARCH_RGCTX_REG
2648 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2649 cfg->uses_rgctx_reg = TRUE;
2650 call->rgctx_reg = TRUE;
2652 call->rgctx_arg_reg = rgctx_reg;
2659 inline static MonoInst*
2660 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *imt_arg, MonoInst *rgctx_arg)
2665 gboolean check_sp = FALSE;
2667 if (cfg->check_pinvoke_callconv && cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
2668 WrapperInfo *info = mono_marshal_get_wrapper_info (cfg->method);
2670 if (info && info->subtype == WRAPPER_SUBTYPE_PINVOKE)
2675 rgctx_reg = mono_alloc_preg (cfg);
2676 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2680 if (!cfg->stack_inbalance_var)
2681 cfg->stack_inbalance_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2683 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2684 ins->dreg = cfg->stack_inbalance_var->dreg;
2685 MONO_ADD_INS (cfg->cbb, ins);
2688 call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE, rgctx_arg ? TRUE : FALSE, FALSE);
2690 call->inst.sreg1 = addr->dreg;
2693 emit_imt_argument (cfg, call, NULL, imt_arg);
2695 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2700 sp_reg = mono_alloc_preg (cfg);
2702 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2704 MONO_ADD_INS (cfg->cbb, ins);
2706 /* Restore the stack so we don't crash when throwing the exception */
2707 MONO_INST_NEW (cfg, ins, OP_SET_SP);
2708 ins->sreg1 = cfg->stack_inbalance_var->dreg;
2709 MONO_ADD_INS (cfg->cbb, ins);
2711 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, cfg->stack_inbalance_var->dreg, sp_reg);
2712 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ExecutionEngineException");
2716 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2718 return (MonoInst*)call;
2722 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2725 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type);
2727 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2730 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig, gboolean tail,
2731 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *rgctx_arg)
2733 #ifndef DISABLE_REMOTING
2734 gboolean might_be_remote = FALSE;
2736 gboolean virtual = this != NULL;
2737 gboolean enable_for_aot = TRUE;
2741 gboolean need_unbox_trampoline;
2744 sig = mono_method_signature (method);
2747 rgctx_reg = mono_alloc_preg (cfg);
2748 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2751 if (method->string_ctor) {
2752 /* Create the real signature */
2753 /* FIXME: Cache these */
2754 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2755 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2760 context_used = mini_method_check_context_used (cfg, method);
2762 #ifndef DISABLE_REMOTING
2763 might_be_remote = this && sig->hasthis &&
2764 (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) &&
2765 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && (!MONO_CHECK_THIS (this) || context_used);
2767 if (might_be_remote && context_used) {
2770 g_assert (cfg->generic_sharing_context);
2772 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2774 return mono_emit_calli (cfg, sig, args, addr, NULL, NULL);
2778 need_unbox_trampoline = method->klass == mono_defaults.object_class || (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE);
2780 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, tail, rgctx_arg ? TRUE : FALSE, need_unbox_trampoline);
2782 #ifndef DISABLE_REMOTING
2783 if (might_be_remote)
2784 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2787 call->method = method;
2788 call->inst.flags |= MONO_INST_HAS_METHOD;
2789 call->inst.inst_left = this;
2790 call->tail_call = tail;
2793 int vtable_reg, slot_reg, this_reg;
2796 this_reg = this->dreg;
2798 if (ARCH_HAVE_DELEGATE_TRAMPOLINES && (method->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (method->name, "Invoke")) {
2799 MonoInst *dummy_use;
2801 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2803 /* Make a call to delegate->invoke_impl */
2804 call->inst.inst_basereg = this_reg;
2805 call->inst.inst_offset = MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2806 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2808 /* We must emit a dummy use here because the delegate trampoline will
2809 replace the 'this' argument with the delegate target making this activation
2810 no longer a root for the delegate.
2811 This is an issue for delegates that target collectible code such as dynamic
2812 methods of GC'able assemblies.
2814 For a test case look into #667921.
2816 FIXME: a dummy use is not the best way to do it as the local register allocator
2817 will put it on a caller save register and spil it around the call.
2818 Ideally, we would either put it on a callee save register or only do the store part.
2820 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [0]);
2822 return (MonoInst*)call;
2825 if ((!cfg->compile_aot || enable_for_aot) &&
2826 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2827 (MONO_METHOD_IS_FINAL (method) &&
2828 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2829 !(mono_class_is_marshalbyref (method->klass) && context_used)) {
2831 * the method is not virtual, we just need to ensure this is not null
2832 * and then we can call the method directly.
2834 #ifndef DISABLE_REMOTING
2835 if (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) {
2837 * The check above ensures method is not gshared, this is needed since
2838 * gshared methods can't have wrappers.
2840 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2844 if (!method->string_ctor)
2845 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2847 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2848 } else if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2850 * the method is virtual, but we can statically dispatch since either
2851 * it's class or the method itself are sealed.
2852 * But first we need to ensure it's not a null reference.
2854 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2856 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2858 vtable_reg = alloc_preg (cfg);
2859 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2860 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2863 guint32 imt_slot = mono_method_get_imt_slot (method);
2864 emit_imt_argument (cfg, call, call->method, imt_arg);
2865 slot_reg = vtable_reg;
2866 offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2868 if (slot_reg == -1) {
2869 slot_reg = alloc_preg (cfg);
2870 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2871 offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
2874 slot_reg = vtable_reg;
2875 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) +
2876 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2878 g_assert (mono_method_signature (method)->generic_param_count);
2879 emit_imt_argument (cfg, call, call->method, imt_arg);
2883 call->inst.sreg1 = slot_reg;
2884 call->inst.inst_offset = offset;
2885 call->virtual = TRUE;
2889 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2892 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2894 return (MonoInst*)call;
2898 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2900 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), FALSE, args, this, NULL, NULL);
2904 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2911 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE, FALSE, FALSE);
2914 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2916 return (MonoInst*)call;
2920 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2922 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2926 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2930 * mono_emit_abs_call:
2932 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2934 inline static MonoInst*
2935 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2936 MonoMethodSignature *sig, MonoInst **args)
2938 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2942 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2945 if (cfg->abs_patches == NULL)
2946 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2947 g_hash_table_insert (cfg->abs_patches, ji, ji);
2948 ins = mono_emit_native_call (cfg, ji, sig, args);
2949 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2954 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2956 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2957 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2961 * Native code might return non register sized integers
2962 * without initializing the upper bits.
2964 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
2965 case OP_LOADI1_MEMBASE:
2966 widen_op = OP_ICONV_TO_I1;
2968 case OP_LOADU1_MEMBASE:
2969 widen_op = OP_ICONV_TO_U1;
2971 case OP_LOADI2_MEMBASE:
2972 widen_op = OP_ICONV_TO_I2;
2974 case OP_LOADU2_MEMBASE:
2975 widen_op = OP_ICONV_TO_U2;
2981 if (widen_op != -1) {
2982 int dreg = alloc_preg (cfg);
2985 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
2986 widen->type = ins->type;
2996 get_memcpy_method (void)
2998 static MonoMethod *memcpy_method = NULL;
2999 if (!memcpy_method) {
3000 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
3002 g_error ("Old corlib found. Install a new one");
3004 return memcpy_method;
3008 create_write_barrier_bitmap (MonoCompile *cfg, MonoClass *klass, unsigned *wb_bitmap, int offset)
3010 MonoClassField *field;
3011 gpointer iter = NULL;
3013 while ((field = mono_class_get_fields (klass, &iter))) {
3016 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
3018 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
3019 if (mini_type_is_reference (cfg, mono_field_get_type (field))) {
3020 g_assert ((foffset % SIZEOF_VOID_P) == 0);
3021 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
3023 MonoClass *field_class = mono_class_from_mono_type (field->type);
3024 if (field_class->has_references)
3025 create_write_barrier_bitmap (cfg, field_class, wb_bitmap, offset + foffset);
3031 emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value)
3033 int card_table_shift_bits;
3034 gpointer card_table_mask;
3036 MonoInst *dummy_use;
3037 int nursery_shift_bits;
3038 size_t nursery_size;
3039 gboolean has_card_table_wb = FALSE;
3041 if (!cfg->gen_write_barriers)
3044 card_table = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
3046 mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
3048 #ifdef MONO_ARCH_HAVE_CARD_TABLE_WBARRIER
3049 has_card_table_wb = TRUE;
3052 if (has_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0 && !COMPILE_LLVM (cfg)) {
3055 MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
3056 wbarrier->sreg1 = ptr->dreg;
3057 wbarrier->sreg2 = value->dreg;
3058 MONO_ADD_INS (cfg->cbb, wbarrier);
3059 } else if (card_table) {
3060 int offset_reg = alloc_preg (cfg);
3061 int card_reg = alloc_preg (cfg);
3064 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
3065 if (card_table_mask)
3066 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
3068 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
3069 * IMM's larger than 32bits.
3071 if (cfg->compile_aot) {
3072 MONO_EMIT_NEW_AOTCONST (cfg, card_reg, NULL, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR);
3074 MONO_INST_NEW (cfg, ins, OP_PCONST);
3075 ins->inst_p0 = card_table;
3076 ins->dreg = card_reg;
3077 MONO_ADD_INS (cfg->cbb, ins);
3080 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
3081 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
3083 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
3084 mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
3087 EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
3091 mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
3093 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
3094 unsigned need_wb = 0;
3099 /*types with references can't have alignment smaller than sizeof(void*) */
3100 if (align < SIZEOF_VOID_P)
3103 /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
3104 if (size > 32 * SIZEOF_VOID_P)
3107 create_write_barrier_bitmap (cfg, klass, &need_wb, 0);
3109 /* We don't unroll more than 5 stores to avoid code bloat. */
3110 if (size > 5 * SIZEOF_VOID_P) {
3111 /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
3112 size += (SIZEOF_VOID_P - 1);
3113 size &= ~(SIZEOF_VOID_P - 1);
3115 EMIT_NEW_ICONST (cfg, iargs [2], size);
3116 EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
3117 mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
3121 destreg = iargs [0]->dreg;
3122 srcreg = iargs [1]->dreg;
3125 dest_ptr_reg = alloc_preg (cfg);
3126 tmp_reg = alloc_preg (cfg);
3129 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
3131 while (size >= SIZEOF_VOID_P) {
3132 MonoInst *load_inst;
3133 MONO_INST_NEW (cfg, load_inst, OP_LOAD_MEMBASE);
3134 load_inst->dreg = tmp_reg;
3135 load_inst->inst_basereg = srcreg;
3136 load_inst->inst_offset = offset;
3137 MONO_ADD_INS (cfg->cbb, load_inst);
3139 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
3142 emit_write_barrier (cfg, iargs [0], load_inst);
3144 offset += SIZEOF_VOID_P;
3145 size -= SIZEOF_VOID_P;
3148 /*tmp += sizeof (void*)*/
3149 if (size >= SIZEOF_VOID_P) {
3150 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
3151 MONO_ADD_INS (cfg->cbb, iargs [0]);
3155 /* Those cannot be references since size < sizeof (void*) */
3157 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
3158 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
3164 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
3165 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
3171 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
3172 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
3181 * Emit code to copy a valuetype of type @klass whose address is stored in
3182 * @src->dreg to memory whose address is stored at @dest->dreg.
3185 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
3187 MonoInst *iargs [4];
3188 int context_used, n;
3190 MonoMethod *memcpy_method;
3191 MonoInst *size_ins = NULL;
3192 MonoInst *memcpy_ins = NULL;
3196 * This check breaks with spilled vars... need to handle it during verification anyway.
3197 * g_assert (klass && klass == src->klass && klass == dest->klass);
3200 if (mini_is_gsharedvt_klass (cfg, klass)) {
3202 context_used = mini_class_check_context_used (cfg, klass);
3203 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3204 memcpy_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_MEMCPY);
3208 n = mono_class_native_size (klass, &align);
3210 n = mono_class_value_size (klass, &align);
3212 /* if native is true there should be no references in the struct */
3213 if (cfg->gen_write_barriers && (klass->has_references || size_ins) && !native) {
3214 /* Avoid barriers when storing to the stack */
3215 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
3216 (dest->opcode == OP_LDADDR))) {
3222 context_used = mini_class_check_context_used (cfg, klass);
3224 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
3225 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
3227 } else if (context_used) {
3228 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3230 if (cfg->compile_aot) {
3231 EMIT_NEW_CLASSCONST (cfg, iargs [2], klass);
3233 EMIT_NEW_PCONST (cfg, iargs [2], klass);
3234 mono_class_compute_gc_descriptor (klass);
3239 mono_emit_jit_icall (cfg, mono_gsharedvt_value_copy, iargs);
3241 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
3246 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
3247 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
3248 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
3253 iargs [2] = size_ins;
3255 EMIT_NEW_ICONST (cfg, iargs [2], n);
3257 memcpy_method = get_memcpy_method ();
3259 mono_emit_calli (cfg, mono_method_signature (memcpy_method), iargs, memcpy_ins, NULL, NULL);
3261 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
3266 get_memset_method (void)
3268 static MonoMethod *memset_method = NULL;
3269 if (!memset_method) {
3270 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
3272 g_error ("Old corlib found. Install a new one");
3274 return memset_method;
3278 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
3280 MonoInst *iargs [3];
3281 int n, context_used;
3283 MonoMethod *memset_method;
3284 MonoInst *size_ins = NULL;
3285 MonoInst *bzero_ins = NULL;
3286 static MonoMethod *bzero_method;
3288 /* FIXME: Optimize this for the case when dest is an LDADDR */
3290 mono_class_init (klass);
3291 if (mini_is_gsharedvt_klass (cfg, klass)) {
3292 context_used = mini_class_check_context_used (cfg, klass);
3293 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3294 bzero_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_BZERO);
3296 bzero_method = mono_class_get_method_from_name (mono_defaults.string_class, "bzero_aligned_1", 2);
3297 g_assert (bzero_method);
3299 iargs [1] = size_ins;
3300 mono_emit_calli (cfg, mono_method_signature (bzero_method), iargs, bzero_ins, NULL, NULL);
3304 n = mono_class_value_size (klass, &align);
3306 if (n <= sizeof (gpointer) * 5) {
3307 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
3310 memset_method = get_memset_method ();
3312 EMIT_NEW_ICONST (cfg, iargs [1], 0);
3313 EMIT_NEW_ICONST (cfg, iargs [2], n);
3314 mono_emit_method_call (cfg, memset_method, iargs, NULL);
3319 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
3321 MonoInst *this = NULL;
3323 g_assert (cfg->generic_sharing_context);
3325 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
3326 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
3327 !method->klass->valuetype)
3328 EMIT_NEW_ARGLOAD (cfg, this, 0);
3330 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
3331 MonoInst *mrgctx_loc, *mrgctx_var;
3334 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
3336 mrgctx_loc = mono_get_vtable_var (cfg);
3337 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
3340 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
3341 MonoInst *vtable_loc, *vtable_var;
3345 vtable_loc = mono_get_vtable_var (cfg);
3346 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
3348 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
3349 MonoInst *mrgctx_var = vtable_var;
3352 vtable_reg = alloc_preg (cfg);
3353 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, MONO_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
3354 vtable_var->type = STACK_PTR;
3362 vtable_reg = alloc_preg (cfg);
3363 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3368 static MonoJumpInfoRgctxEntry *
3369 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, MonoRgctxInfoType info_type)
3371 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
3372 res->method = method;
3373 res->in_mrgctx = in_mrgctx;
3374 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
3375 res->data->type = patch_type;
3376 res->data->data.target = patch_data;
3377 res->info_type = info_type;
3382 static inline MonoInst*
3383 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3385 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
3389 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
3390 MonoClass *klass, MonoRgctxInfoType rgctx_type)
3392 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
3393 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3395 return emit_rgctx_fetch (cfg, rgctx, entry);
3399 emit_get_rgctx_sig (MonoCompile *cfg, int context_used,
3400 MonoMethodSignature *sig, MonoRgctxInfoType rgctx_type)
3402 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_SIGNATURE, sig, rgctx_type);
3403 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3405 return emit_rgctx_fetch (cfg, rgctx, entry);
3409 emit_get_rgctx_gsharedvt_call (MonoCompile *cfg, int context_used,
3410 MonoMethodSignature *sig, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3412 MonoJumpInfoGSharedVtCall *call_info;
3413 MonoJumpInfoRgctxEntry *entry;
3416 call_info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoGSharedVtCall));
3417 call_info->sig = sig;
3418 call_info->method = cmethod;
3420 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_CALL, call_info, rgctx_type);
3421 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3423 return emit_rgctx_fetch (cfg, rgctx, entry);
3428 emit_get_rgctx_gsharedvt_method (MonoCompile *cfg, int context_used,
3429 MonoMethod *cmethod, MonoGSharedVtMethodInfo *info)
3431 MonoJumpInfoRgctxEntry *entry;
3434 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_METHOD, info, MONO_RGCTX_INFO_METHOD_GSHAREDVT_INFO);
3435 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3437 return emit_rgctx_fetch (cfg, rgctx, entry);
3441 * emit_get_rgctx_method:
3443 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
3444 * normal constants, else emit a load from the rgctx.
3447 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
3448 MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3450 if (!context_used) {
3453 switch (rgctx_type) {
3454 case MONO_RGCTX_INFO_METHOD:
3455 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
3457 case MONO_RGCTX_INFO_METHOD_RGCTX:
3458 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
3461 g_assert_not_reached ();
3464 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
3465 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3467 return emit_rgctx_fetch (cfg, rgctx, entry);
3472 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
3473 MonoClassField *field, MonoRgctxInfoType rgctx_type)
3475 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
3476 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3478 return emit_rgctx_fetch (cfg, rgctx, entry);
3482 get_gsharedvt_info_slot (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3484 MonoGSharedVtMethodInfo *info = cfg->gsharedvt_info;
3485 MonoRuntimeGenericContextInfoTemplate *template;
3490 for (i = 0; i < info->num_entries; ++i) {
3491 MonoRuntimeGenericContextInfoTemplate *otemplate = &info->entries [i];
3493 if (otemplate->info_type == rgctx_type && otemplate->data == data && rgctx_type != MONO_RGCTX_INFO_LOCAL_OFFSET)
3497 if (info->num_entries == info->count_entries) {
3498 MonoRuntimeGenericContextInfoTemplate *new_entries;
3499 int new_count_entries = info->count_entries ? info->count_entries * 2 : 16;
3501 new_entries = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * new_count_entries);
3503 memcpy (new_entries, info->entries, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
3504 info->entries = new_entries;
3505 info->count_entries = new_count_entries;
3508 idx = info->num_entries;
3509 template = &info->entries [idx];
3510 template->info_type = rgctx_type;
3511 template->data = data;
3513 info->num_entries ++;
3519 * emit_get_gsharedvt_info:
3521 * This is similar to emit_get_rgctx_.., but loads the data from the gsharedvt info var instead of calling an rgctx fetch trampoline.
3524 emit_get_gsharedvt_info (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3529 idx = get_gsharedvt_info_slot (cfg, data, rgctx_type);
3530 /* Load info->entries [idx] */
3531 dreg = alloc_preg (cfg);
3532 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, cfg->gsharedvt_info_var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
3538 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type)
3540 return emit_get_gsharedvt_info (cfg, &klass->byval_arg, rgctx_type);
3544 * On return the caller must check @klass for load errors.
3547 emit_generic_class_init (MonoCompile *cfg, MonoClass *klass)
3549 MonoInst *vtable_arg;
3553 context_used = mini_class_check_context_used (cfg, klass);
3556 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
3557 klass, MONO_RGCTX_INFO_VTABLE);
3559 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3563 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
3566 if (COMPILE_LLVM (cfg))
3567 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline_llvm, &vtable_arg);
3569 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable_arg);
3570 #ifdef MONO_ARCH_VTABLE_REG
3571 mono_call_inst_add_outarg_reg (cfg, call, vtable_arg->dreg, MONO_ARCH_VTABLE_REG, FALSE);
3572 cfg->uses_vtable_reg = TRUE;
3579 emit_seq_point (MonoCompile *cfg, MonoMethod *method, guint8* ip, gboolean intr_loc, gboolean nonempty_stack)
3583 if (cfg->gen_seq_points && cfg->method == method) {
3584 NEW_SEQ_POINT (cfg, ins, ip - cfg->header->code, intr_loc);
3586 ins->flags |= MONO_INST_NONEMPTY_STACK;
3587 MONO_ADD_INS (cfg->cbb, ins);
3592 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg, gboolean null_check, MonoBasicBlock **out_bblock)
3594 if (mini_get_debug_options ()->better_cast_details) {
3595 int vtable_reg = alloc_preg (cfg);
3596 int klass_reg = alloc_preg (cfg);
3597 MonoBasicBlock *is_null_bb = NULL;
3599 int to_klass_reg, context_used;
3602 NEW_BBLOCK (cfg, is_null_bb);
3604 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3605 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3608 tls_get = mono_get_jit_tls_intrinsic (cfg);
3610 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3614 MONO_ADD_INS (cfg->cbb, tls_get);
3615 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3616 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3618 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3620 context_used = mini_class_check_context_used (cfg, klass);
3622 MonoInst *class_ins;
3624 class_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3625 to_klass_reg = class_ins->dreg;
3627 to_klass_reg = alloc_preg (cfg);
3628 MONO_EMIT_NEW_CLASSCONST (cfg, to_klass_reg, klass);
3630 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3633 MONO_START_BB (cfg, is_null_bb);
3635 *out_bblock = cfg->cbb;
3641 reset_cast_details (MonoCompile *cfg)
3643 /* Reset the variables holding the cast details */
3644 if (mini_get_debug_options ()->better_cast_details) {
3645 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3647 MONO_ADD_INS (cfg->cbb, tls_get);
3648 /* It is enough to reset the from field */
3649 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3654 * On return the caller must check @array_class for load errors
3657 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
3659 int vtable_reg = alloc_preg (cfg);
3662 context_used = mini_class_check_context_used (cfg, array_class);
3664 save_cast_details (cfg, array_class, obj->dreg, FALSE, NULL);
3666 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3668 if (cfg->opt & MONO_OPT_SHARED) {
3669 int class_reg = alloc_preg (cfg);
3670 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3671 if (cfg->compile_aot) {
3672 int klass_reg = alloc_preg (cfg);
3673 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
3674 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
3676 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
3678 } else if (context_used) {
3679 MonoInst *vtable_ins;
3681 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
3682 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
3684 if (cfg->compile_aot) {
3688 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3690 vt_reg = alloc_preg (cfg);
3691 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
3692 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
3695 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3697 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
3701 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
3703 reset_cast_details (cfg);
3707 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
3708 * generic code is generated.
3711 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
3713 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
3716 MonoInst *rgctx, *addr;
3718 /* FIXME: What if the class is shared? We might not
3719 have to get the address of the method from the
3721 addr = emit_get_rgctx_method (cfg, context_used, method,
3722 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3724 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3726 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
3728 gboolean pass_vtable, pass_mrgctx;
3729 MonoInst *rgctx_arg = NULL;
3731 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
3732 g_assert (!pass_mrgctx);
3735 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3738 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
3741 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
3746 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
3750 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
3751 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
3752 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
3753 int rank_reg = alloc_dreg (cfg ,STACK_I4);
3755 obj_reg = sp [0]->dreg;
3756 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3757 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
3759 /* FIXME: generics */
3760 g_assert (klass->rank == 0);
3763 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
3764 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3766 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3767 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, element_class));
3770 MonoInst *element_class;
3772 /* This assertion is from the unboxcast insn */
3773 g_assert (klass->rank == 0);
3775 element_class = emit_get_rgctx_klass (cfg, context_used,
3776 klass->element_class, MONO_RGCTX_INFO_KLASS);
3778 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
3779 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3781 save_cast_details (cfg, klass->element_class, obj_reg, FALSE, NULL);
3782 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
3783 reset_cast_details (cfg);
3786 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), obj_reg, sizeof (MonoObject));
3787 MONO_ADD_INS (cfg->cbb, add);
3788 add->type = STACK_MP;
3795 handle_unbox_gsharedvt (MonoCompile *cfg, MonoClass *klass, MonoInst *obj, MonoBasicBlock **out_cbb)
3797 MonoInst *addr, *klass_inst, *is_ref, *args[16];
3798 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
3802 klass_inst = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_KLASS);
3808 args [1] = klass_inst;
3811 obj = mono_emit_jit_icall (cfg, mono_object_castclass_unbox, args);
3813 NEW_BBLOCK (cfg, is_ref_bb);
3814 NEW_BBLOCK (cfg, is_nullable_bb);
3815 NEW_BBLOCK (cfg, end_bb);
3816 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
3817 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 1);
3818 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
3820 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 2);
3821 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
3823 /* This will contain either the address of the unboxed vtype, or an address of the temporary where the ref is stored */
3824 addr_reg = alloc_dreg (cfg, STACK_MP);
3828 NEW_BIALU_IMM (cfg, addr, OP_ADD_IMM, addr_reg, obj->dreg, sizeof (MonoObject));
3829 MONO_ADD_INS (cfg->cbb, addr);
3831 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3834 MONO_START_BB (cfg, is_ref_bb);
3836 /* Save the ref to a temporary */
3837 dreg = alloc_ireg (cfg);
3838 EMIT_NEW_VARLOADA_VREG (cfg, addr, dreg, &klass->byval_arg);
3839 addr->dreg = addr_reg;
3840 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, obj->dreg);
3841 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3844 MONO_START_BB (cfg, is_nullable_bb);
3847 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_NULLABLE_CLASS_UNBOX);
3848 MonoInst *unbox_call;
3849 MonoMethodSignature *unbox_sig;
3852 var = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
3854 unbox_sig = mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
3855 unbox_sig->ret = &klass->byval_arg;
3856 unbox_sig->param_count = 1;
3857 unbox_sig->params [0] = &mono_defaults.object_class->byval_arg;
3858 unbox_call = mono_emit_calli (cfg, unbox_sig, &obj, addr, NULL, NULL);
3860 EMIT_NEW_VARLOADA_VREG (cfg, addr, unbox_call->dreg, &klass->byval_arg);
3861 addr->dreg = addr_reg;
3864 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3867 MONO_START_BB (cfg, end_bb);
3870 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr_reg, 0);
3872 *out_cbb = cfg->cbb;
3878 * Returns NULL and set the cfg exception on error.
3881 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
3883 MonoInst *iargs [2];
3889 MonoInst *iargs [2];
3891 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (klass, for_box);
3893 if (cfg->opt & MONO_OPT_SHARED)
3894 rgctx_info = MONO_RGCTX_INFO_KLASS;
3896 rgctx_info = MONO_RGCTX_INFO_VTABLE;
3897 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
3899 if (cfg->opt & MONO_OPT_SHARED) {
3900 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3902 alloc_ftn = mono_object_new;
3905 alloc_ftn = mono_object_new_specific;
3908 if (managed_alloc && !(cfg->opt & MONO_OPT_SHARED))
3909 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3911 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3914 if (cfg->opt & MONO_OPT_SHARED) {
3915 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3916 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
3918 alloc_ftn = mono_object_new;
3919 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
3920 /* This happens often in argument checking code, eg. throw new FooException... */
3921 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
3922 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
3923 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
3925 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3926 MonoMethod *managed_alloc = NULL;
3930 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
3931 cfg->exception_ptr = klass;
3935 #ifndef MONO_CROSS_COMPILE
3936 managed_alloc = mono_gc_get_managed_allocator (klass, for_box);
3939 if (managed_alloc) {
3940 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3941 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3943 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
3945 guint32 lw = vtable->klass->instance_size;
3946 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
3947 EMIT_NEW_ICONST (cfg, iargs [0], lw);
3948 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
3951 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3955 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3959 * Returns NULL and set the cfg exception on error.
3962 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used, MonoBasicBlock **out_cbb)
3964 MonoInst *alloc, *ins;
3966 *out_cbb = cfg->cbb;
3968 if (mono_class_is_nullable (klass)) {
3969 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
3972 /* FIXME: What if the class is shared? We might not
3973 have to get the method address from the RGCTX. */
3974 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
3975 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3976 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3978 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
3980 gboolean pass_vtable, pass_mrgctx;
3981 MonoInst *rgctx_arg = NULL;
3983 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
3984 g_assert (!pass_mrgctx);
3987 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3990 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
3993 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
3997 if (mini_is_gsharedvt_klass (cfg, klass)) {
3998 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
3999 MonoInst *res, *is_ref, *src_var, *addr;
4002 dreg = alloc_ireg (cfg);
4004 NEW_BBLOCK (cfg, is_ref_bb);
4005 NEW_BBLOCK (cfg, is_nullable_bb);
4006 NEW_BBLOCK (cfg, end_bb);
4007 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
4008 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 1);
4009 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
4011 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 2);
4012 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
4015 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4018 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4019 ins->opcode = OP_STOREV_MEMBASE;
4021 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, alloc->dreg);
4022 res->type = STACK_OBJ;
4024 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4027 MONO_START_BB (cfg, is_ref_bb);
4028 addr_reg = alloc_ireg (cfg);
4030 /* val is a vtype, so has to load the value manually */
4031 src_var = get_vreg_to_inst (cfg, val->dreg);
4033 src_var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, val->dreg);
4034 EMIT_NEW_VARLOADA (cfg, addr, src_var, src_var->inst_vtype);
4035 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, addr->dreg, 0);
4036 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4039 MONO_START_BB (cfg, is_nullable_bb);
4042 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass,
4043 MONO_RGCTX_INFO_NULLABLE_CLASS_BOX);
4045 MonoMethodSignature *box_sig;
4048 * klass is Nullable<T>, need to call Nullable<T>.Box () using a gsharedvt signature, but we cannot
4049 * construct that method at JIT time, so have to do things by hand.
4051 box_sig = mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
4052 box_sig->ret = &mono_defaults.object_class->byval_arg;
4053 box_sig->param_count = 1;
4054 box_sig->params [0] = &klass->byval_arg;
4055 box_call = mono_emit_calli (cfg, box_sig, &val, addr, NULL, NULL);
4056 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, box_call->dreg);
4057 res->type = STACK_OBJ;
4061 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4063 MONO_START_BB (cfg, end_bb);
4065 *out_cbb = cfg->cbb;
4069 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4073 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4080 mini_class_has_reference_variant_generic_argument (MonoCompile *cfg, MonoClass *klass, int context_used)
4083 MonoGenericContainer *container;
4084 MonoGenericInst *ginst;
4086 if (klass->generic_class) {
4087 container = klass->generic_class->container_class->generic_container;
4088 ginst = klass->generic_class->context.class_inst;
4089 } else if (klass->generic_container && context_used) {
4090 container = klass->generic_container;
4091 ginst = container->context.class_inst;
4096 for (i = 0; i < container->type_argc; ++i) {
4098 if (!(mono_generic_container_get_param_info (container, i)->flags & (MONO_GEN_PARAM_VARIANT|MONO_GEN_PARAM_COVARIANT)))
4100 type = ginst->type_argv [i];
4101 if (mini_type_is_reference (cfg, type))
4107 #define is_complex_isinst(klass) ((klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || mono_class_is_marshalbyref (klass) || (klass->flags & TYPE_ATTRIBUTE_SEALED) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
4110 emit_castclass_with_cache (MonoCompile *cfg, MonoClass *klass, MonoInst **args, MonoBasicBlock **out_bblock)
4112 MonoMethod *mono_castclass;
4115 mono_castclass = mono_marshal_get_castclass_with_cache ();
4117 save_cast_details (cfg, klass, args [0]->dreg, TRUE, out_bblock);
4118 res = mono_emit_method_call (cfg, mono_castclass, args, NULL);
4119 reset_cast_details (cfg);
4120 *out_bblock = cfg->cbb;
4126 emit_castclass_with_cache_nonshared (MonoCompile *cfg, MonoInst *obj, MonoClass *klass, MonoBasicBlock **out_bblock)
4135 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
4138 if (cfg->compile_aot) {
4139 /* Each CASTCLASS_CACHE patch needs a unique index which identifies the call site */
4140 cfg->castclass_cache_index ++;
4141 idx = (cfg->method_index << 16) | cfg->castclass_cache_index;
4142 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, GINT_TO_POINTER (idx));
4144 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
4147 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
4149 return emit_castclass_with_cache (cfg, klass, args, out_bblock);
4153 * Returns NULL and set the cfg exception on error.
4156 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, guint8 *ip, MonoBasicBlock **out_bb, int *inline_costs)
4158 MonoBasicBlock *is_null_bb;
4159 int obj_reg = src->dreg;
4160 int vtable_reg = alloc_preg (cfg);
4162 MonoInst *klass_inst = NULL, *res;
4163 MonoBasicBlock *bblock;
4167 context_used = mini_class_check_context_used (cfg, klass);
4169 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
4170 res = emit_castclass_with_cache_nonshared (cfg, src, klass, &bblock);
4171 (*inline_costs) += 2;
4174 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
4175 MonoMethod *mono_castclass;
4176 MonoInst *iargs [1];
4179 mono_castclass = mono_marshal_get_castclass (klass);
4182 save_cast_details (cfg, klass, src->dreg, TRUE, &bblock);
4183 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
4184 iargs, ip, cfg->real_offset, TRUE, &bblock);
4185 reset_cast_details (cfg);
4186 CHECK_CFG_EXCEPTION;
4187 g_assert (costs > 0);
4189 cfg->real_offset += 5;
4191 (*inline_costs) += costs;
4200 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
4201 MonoInst *cache_ins;
4203 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
4208 /* klass - it's the second element of the cache entry*/
4209 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
4212 args [2] = cache_ins;
4214 return emit_castclass_with_cache (cfg, klass, args, out_bb);
4217 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
4220 NEW_BBLOCK (cfg, is_null_bb);
4222 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4223 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
4225 save_cast_details (cfg, klass, obj_reg, FALSE, NULL);
4227 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4228 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4229 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
4231 int klass_reg = alloc_preg (cfg);
4233 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4235 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4236 /* the remoting code is broken, access the class for now */
4237 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4238 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4240 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4241 cfg->exception_ptr = klass;
4244 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4246 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4247 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4249 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
4251 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4252 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
4256 MONO_START_BB (cfg, is_null_bb);
4258 reset_cast_details (cfg);
4269 * Returns NULL and set the cfg exception on error.
4272 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
4275 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
4276 int obj_reg = src->dreg;
4277 int vtable_reg = alloc_preg (cfg);
4278 int res_reg = alloc_ireg_ref (cfg);
4279 MonoInst *klass_inst = NULL;
4284 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
4285 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
4286 MonoInst *cache_ins;
4288 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
4293 /* klass - it's the second element of the cache entry*/
4294 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
4297 args [2] = cache_ins;
4299 return mono_emit_method_call (cfg, mono_isinst, args, NULL);
4302 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
4305 NEW_BBLOCK (cfg, is_null_bb);
4306 NEW_BBLOCK (cfg, false_bb);
4307 NEW_BBLOCK (cfg, end_bb);
4309 /* Do the assignment at the beginning, so the other assignment can be if converted */
4310 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
4311 ins->type = STACK_OBJ;
4314 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4315 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
4317 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4319 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4320 g_assert (!context_used);
4321 /* the is_null_bb target simply copies the input register to the output */
4322 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
4324 int klass_reg = alloc_preg (cfg);
4327 int rank_reg = alloc_preg (cfg);
4328 int eclass_reg = alloc_preg (cfg);
4330 g_assert (!context_used);
4331 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
4332 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
4333 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4334 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4335 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, cast_class));
4336 if (klass->cast_class == mono_defaults.object_class) {
4337 int parent_reg = alloc_preg (cfg);
4338 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, MONO_STRUCT_OFFSET (MonoClass, parent));
4339 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
4340 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4341 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4342 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
4343 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
4344 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4345 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4346 } else if (klass->cast_class == mono_defaults.enum_class) {
4347 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4348 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4349 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
4350 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4352 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
4353 /* Check that the object is a vector too */
4354 int bounds_reg = alloc_preg (cfg);
4355 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, MONO_STRUCT_OFFSET (MonoArray, bounds));
4356 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
4357 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4360 /* the is_null_bb target simply copies the input register to the output */
4361 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4363 } else if (mono_class_is_nullable (klass)) {
4364 g_assert (!context_used);
4365 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4366 /* the is_null_bb target simply copies the input register to the output */
4367 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
4369 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4370 g_assert (!context_used);
4371 /* the remoting code is broken, access the class for now */
4372 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4373 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4375 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4376 cfg->exception_ptr = klass;
4379 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4381 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4382 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4384 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4385 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
4387 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4388 /* the is_null_bb target simply copies the input register to the output */
4389 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
4394 MONO_START_BB (cfg, false_bb);
4396 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
4397 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4399 MONO_START_BB (cfg, is_null_bb);
4401 MONO_START_BB (cfg, end_bb);
4407 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4409 /* This opcode takes as input an object reference and a class, and returns:
4410 0) if the object is an instance of the class,
4411 1) if the object is not instance of the class,
4412 2) if the object is a proxy whose type cannot be determined */
4415 #ifndef DISABLE_REMOTING
4416 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
4418 MonoBasicBlock *true_bb, *false_bb, *end_bb;
4420 int obj_reg = src->dreg;
4421 int dreg = alloc_ireg (cfg);
4423 #ifndef DISABLE_REMOTING
4424 int klass_reg = alloc_preg (cfg);
4427 NEW_BBLOCK (cfg, true_bb);
4428 NEW_BBLOCK (cfg, false_bb);
4429 NEW_BBLOCK (cfg, end_bb);
4430 #ifndef DISABLE_REMOTING
4431 NEW_BBLOCK (cfg, false2_bb);
4432 NEW_BBLOCK (cfg, no_proxy_bb);
4435 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4436 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
4438 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4439 #ifndef DISABLE_REMOTING
4440 NEW_BBLOCK (cfg, interface_fail_bb);
4443 tmp_reg = alloc_preg (cfg);
4444 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4445 #ifndef DISABLE_REMOTING
4446 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
4447 MONO_START_BB (cfg, interface_fail_bb);
4448 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4450 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
4452 tmp_reg = alloc_preg (cfg);
4453 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4454 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4455 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
4457 mini_emit_iface_cast (cfg, tmp_reg, klass, false_bb, true_bb);
4460 #ifndef DISABLE_REMOTING
4461 tmp_reg = alloc_preg (cfg);
4462 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4463 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4465 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4466 tmp_reg = alloc_preg (cfg);
4467 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4468 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4470 tmp_reg = alloc_preg (cfg);
4471 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4472 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4473 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4475 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
4476 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
4478 MONO_START_BB (cfg, no_proxy_bb);
4480 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
4482 g_error ("transparent proxy support is disabled while trying to JIT code that uses it");
4486 MONO_START_BB (cfg, false_bb);
4488 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4489 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4491 #ifndef DISABLE_REMOTING
4492 MONO_START_BB (cfg, false2_bb);
4494 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
4495 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4498 MONO_START_BB (cfg, true_bb);
4500 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4502 MONO_START_BB (cfg, end_bb);
4505 MONO_INST_NEW (cfg, ins, OP_ICONST);
4507 ins->type = STACK_I4;
4513 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4515 /* This opcode takes as input an object reference and a class, and returns:
4516 0) if the object is an instance of the class,
4517 1) if the object is a proxy whose type cannot be determined
4518 an InvalidCastException exception is thrown otherwhise*/
4521 #ifndef DISABLE_REMOTING
4522 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
4524 MonoBasicBlock *ok_result_bb;
4526 int obj_reg = src->dreg;
4527 int dreg = alloc_ireg (cfg);
4528 int tmp_reg = alloc_preg (cfg);
4530 #ifndef DISABLE_REMOTING
4531 int klass_reg = alloc_preg (cfg);
4532 NEW_BBLOCK (cfg, end_bb);
4535 NEW_BBLOCK (cfg, ok_result_bb);
4537 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4538 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
4540 save_cast_details (cfg, klass, obj_reg, FALSE, NULL);
4542 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4543 #ifndef DISABLE_REMOTING
4544 NEW_BBLOCK (cfg, interface_fail_bb);
4546 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4547 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
4548 MONO_START_BB (cfg, interface_fail_bb);
4549 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4551 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
4553 tmp_reg = alloc_preg (cfg);
4554 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4555 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4556 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
4558 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4559 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4561 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4562 mini_emit_iface_cast (cfg, tmp_reg, klass, NULL, NULL);
4563 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, ok_result_bb);
4566 #ifndef DISABLE_REMOTING
4567 NEW_BBLOCK (cfg, no_proxy_bb);
4569 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4570 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4571 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4573 tmp_reg = alloc_preg (cfg);
4574 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4575 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4577 tmp_reg = alloc_preg (cfg);
4578 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4579 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4580 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4582 NEW_BBLOCK (cfg, fail_1_bb);
4584 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
4586 MONO_START_BB (cfg, fail_1_bb);
4588 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4589 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4591 MONO_START_BB (cfg, no_proxy_bb);
4593 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
4595 g_error ("Transparent proxy support is disabled while trying to JIT code that uses it");
4599 MONO_START_BB (cfg, ok_result_bb);
4601 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4603 #ifndef DISABLE_REMOTING
4604 MONO_START_BB (cfg, end_bb);
4608 MONO_INST_NEW (cfg, ins, OP_ICONST);
4610 ins->type = STACK_I4;
4616 * Returns NULL and set the cfg exception on error.
4618 static G_GNUC_UNUSED MonoInst*
4619 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used, gboolean virtual)
4623 gpointer trampoline;
4624 MonoInst *obj, *method_ins, *tramp_ins;
4628 // FIXME reenable optimisation for virtual case
4633 MonoMethod *invoke = mono_get_delegate_invoke (klass);
4636 if (!mono_get_delegate_virtual_invoke_impl (mono_method_signature (invoke), context_used ? NULL : method))
4640 obj = handle_alloc (cfg, klass, FALSE, 0);
4644 /* Inline the contents of mono_delegate_ctor */
4646 /* Set target field */
4647 /* Optimize away setting of NULL target */
4648 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0)) {
4649 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
4650 if (cfg->gen_write_barriers) {
4651 dreg = alloc_preg (cfg);
4652 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target));
4653 emit_write_barrier (cfg, ptr, target);
4657 /* Set method field */
4658 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
4659 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
4662 * To avoid looking up the compiled code belonging to the target method
4663 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
4664 * store it, and we fill it after the method has been compiled.
4666 if (!method->dynamic && !(cfg->opt & MONO_OPT_SHARED)) {
4667 MonoInst *code_slot_ins;
4670 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
4672 domain = mono_domain_get ();
4673 mono_domain_lock (domain);
4674 if (!domain_jit_info (domain)->method_code_hash)
4675 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
4676 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
4678 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
4679 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
4681 mono_domain_unlock (domain);
4683 if (cfg->compile_aot)
4684 EMIT_NEW_AOTCONST (cfg, code_slot_ins, MONO_PATCH_INFO_METHOD_CODE_SLOT, method);
4686 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
4688 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
4691 if (cfg->compile_aot) {
4692 MonoDelegateClassMethodPair *del_tramp;
4694 del_tramp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoDelegateClassMethodPair));
4695 del_tramp->klass = klass;
4696 del_tramp->method = context_used ? NULL : method;
4697 del_tramp->virtual = virtual;
4698 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, del_tramp);
4701 trampoline = mono_create_delegate_virtual_trampoline (cfg->domain, klass, context_used ? NULL : method);
4703 trampoline = mono_create_delegate_trampoline_info (cfg->domain, klass, context_used ? NULL : method);
4704 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
4707 /* Set invoke_impl field */
4709 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
4711 dreg = alloc_preg (cfg);
4712 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, invoke_impl));
4713 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), dreg);
4715 dreg = alloc_preg (cfg);
4716 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, method_ptr));
4717 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr), dreg);
4720 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
4726 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
4728 MonoJitICallInfo *info;
4730 /* Need to register the icall so it gets an icall wrapper */
4731 info = mono_get_array_new_va_icall (rank);
4733 cfg->flags |= MONO_CFG_HAS_VARARGS;
4735 /* mono_array_new_va () needs a vararg calling convention */
4736 cfg->disable_llvm = TRUE;
4738 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
4739 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
4743 mono_emit_load_got_addr (MonoCompile *cfg)
4745 MonoInst *getaddr, *dummy_use;
4747 if (!cfg->got_var || cfg->got_var_allocated)
4750 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
4751 getaddr->cil_code = cfg->header->code;
4752 getaddr->dreg = cfg->got_var->dreg;
4754 /* Add it to the start of the first bblock */
4755 if (cfg->bb_entry->code) {
4756 getaddr->next = cfg->bb_entry->code;
4757 cfg->bb_entry->code = getaddr;
4760 MONO_ADD_INS (cfg->bb_entry, getaddr);
4762 cfg->got_var_allocated = TRUE;
4765 * Add a dummy use to keep the got_var alive, since real uses might
4766 * only be generated by the back ends.
4767 * Add it to end_bblock, so the variable's lifetime covers the whole
4769 * It would be better to make the usage of the got var explicit in all
4770 * cases when the backend needs it (i.e. calls, throw etc.), so this
4771 * wouldn't be needed.
4773 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
4774 MONO_ADD_INS (cfg->bb_exit, dummy_use);
4777 static int inline_limit;
4778 static gboolean inline_limit_inited;
4781 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
4783 MonoMethodHeaderSummary header;
4785 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
4786 MonoMethodSignature *sig = mono_method_signature (method);
4790 if (cfg->disable_inline)
4792 if (cfg->generic_sharing_context)
4795 if (cfg->inline_depth > 10)
4798 #ifdef MONO_ARCH_HAVE_LMF_OPS
4799 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
4800 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
4801 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
4806 if (!mono_method_get_header_summary (method, &header))
4809 /*runtime, icall and pinvoke are checked by summary call*/
4810 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
4811 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
4812 (mono_class_is_marshalbyref (method->klass)) ||
4816 /* also consider num_locals? */
4817 /* Do the size check early to avoid creating vtables */
4818 if (!inline_limit_inited) {
4819 if (g_getenv ("MONO_INLINELIMIT"))
4820 inline_limit = atoi (g_getenv ("MONO_INLINELIMIT"));
4822 inline_limit = INLINE_LENGTH_LIMIT;
4823 inline_limit_inited = TRUE;
4825 if (header.code_size >= inline_limit && !(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
4829 * if we can initialize the class of the method right away, we do,
4830 * otherwise we don't allow inlining if the class needs initialization,
4831 * since it would mean inserting a call to mono_runtime_class_init()
4832 * inside the inlined code
4834 if (!(cfg->opt & MONO_OPT_SHARED)) {
4835 /* The AggressiveInlining hint is a good excuse to force that cctor to run. */
4836 if (method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING) {
4837 vtable = mono_class_vtable (cfg->domain, method->klass);
4840 if (!cfg->compile_aot)
4841 mono_runtime_class_init (vtable);
4842 } else if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
4843 if (cfg->run_cctors && method->klass->has_cctor) {
4844 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
4845 if (!method->klass->runtime_info)
4846 /* No vtable created yet */
4848 vtable = mono_class_vtable (cfg->domain, method->klass);
4851 /* This makes so that inline cannot trigger */
4852 /* .cctors: too many apps depend on them */
4853 /* running with a specific order... */
4854 if (! vtable->initialized)
4856 mono_runtime_class_init (vtable);
4858 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
4859 if (!method->klass->runtime_info)
4860 /* No vtable created yet */
4862 vtable = mono_class_vtable (cfg->domain, method->klass);
4865 if (!vtable->initialized)
4870 * If we're compiling for shared code
4871 * the cctor will need to be run at aot method load time, for example,
4872 * or at the end of the compilation of the inlining method.
4874 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
4879 * CAS - do not inline methods with declarative security
4880 * Note: this has to be before any possible return TRUE;
4882 if (mono_security_method_has_declsec (method))
4885 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
4886 if (mono_arch_is_soft_float ()) {
4888 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
4890 for (i = 0; i < sig->param_count; ++i)
4891 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
4896 if (g_list_find (cfg->dont_inline, method))
4903 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoClass *klass, MonoVTable *vtable)
4905 if (!cfg->compile_aot) {
4907 if (vtable->initialized)
4911 if (klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
4912 if (cfg->method == method)
4916 if (!mono_class_needs_cctor_run (klass, method))
4919 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (klass == method->klass))
4920 /* The initialization is already done before the method is called */
4927 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
4931 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
4934 if (mini_is_gsharedvt_variable_klass (cfg, klass)) {
4937 mono_class_init (klass);
4938 size = mono_class_array_element_size (klass);
4941 mult_reg = alloc_preg (cfg);
4942 array_reg = arr->dreg;
4943 index_reg = index->dreg;
4945 #if SIZEOF_REGISTER == 8
4946 /* The array reg is 64 bits but the index reg is only 32 */
4947 if (COMPILE_LLVM (cfg)) {
4949 index2_reg = index_reg;
4951 index2_reg = alloc_preg (cfg);
4952 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
4955 if (index->type == STACK_I8) {
4956 index2_reg = alloc_preg (cfg);
4957 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
4959 index2_reg = index_reg;
4964 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
4966 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4967 if (size == 1 || size == 2 || size == 4 || size == 8) {
4968 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
4970 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], MONO_STRUCT_OFFSET (MonoArray, vector));
4971 ins->klass = mono_class_get_element_class (klass);
4972 ins->type = STACK_MP;
4978 add_reg = alloc_ireg_mp (cfg);
4981 MonoInst *rgctx_ins;
4984 g_assert (cfg->generic_sharing_context);
4985 context_used = mini_class_check_context_used (cfg, klass);
4986 g_assert (context_used);
4987 rgctx_ins = emit_get_gsharedvt_info (cfg, &klass->byval_arg, MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE);
4988 MONO_EMIT_NEW_BIALU (cfg, OP_IMUL, mult_reg, index2_reg, rgctx_ins->dreg);
4990 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
4992 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
4993 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
4994 ins->klass = mono_class_get_element_class (klass);
4995 ins->type = STACK_MP;
4996 MONO_ADD_INS (cfg->cbb, ins);
5001 #ifndef MONO_ARCH_EMULATE_MUL_DIV
5003 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
5005 int bounds_reg = alloc_preg (cfg);
5006 int add_reg = alloc_ireg_mp (cfg);
5007 int mult_reg = alloc_preg (cfg);
5008 int mult2_reg = alloc_preg (cfg);
5009 int low1_reg = alloc_preg (cfg);
5010 int low2_reg = alloc_preg (cfg);
5011 int high1_reg = alloc_preg (cfg);
5012 int high2_reg = alloc_preg (cfg);
5013 int realidx1_reg = alloc_preg (cfg);
5014 int realidx2_reg = alloc_preg (cfg);
5015 int sum_reg = alloc_preg (cfg);
5016 int index1, index2, tmpreg;
5020 mono_class_init (klass);
5021 size = mono_class_array_element_size (klass);
5023 index1 = index_ins1->dreg;
5024 index2 = index_ins2->dreg;
5026 #if SIZEOF_REGISTER == 8
5027 /* The array reg is 64 bits but the index reg is only 32 */
5028 if (COMPILE_LLVM (cfg)) {
5031 tmpreg = alloc_preg (cfg);
5032 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index1);
5034 tmpreg = alloc_preg (cfg);
5035 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index2);
5039 // FIXME: Do we need to do something here for i8 indexes, like in ldelema_1_ins ?
5043 /* range checking */
5044 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
5045 arr->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
5047 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
5048 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5049 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
5050 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
5051 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5052 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
5053 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
5055 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
5056 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5057 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
5058 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
5059 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5060 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
5061 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
5063 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
5064 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
5065 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
5066 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
5067 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
5069 ins->type = STACK_MP;
5071 MONO_ADD_INS (cfg->cbb, ins);
5078 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
5082 MonoMethod *addr_method;
5085 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
5088 return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], TRUE);
5090 #ifndef MONO_ARCH_EMULATE_MUL_DIV
5091 /* emit_ldelema_2 depends on OP_LMUL */
5092 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS)) {
5093 return mini_emit_ldelema_2_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], sp [2]);
5097 element_size = mono_class_array_element_size (cmethod->klass->element_class);
5098 addr_method = mono_marshal_get_array_address (rank, element_size);
5099 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
5104 static MonoBreakPolicy
5105 always_insert_breakpoint (MonoMethod *method)
5107 return MONO_BREAK_POLICY_ALWAYS;
5110 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
5113 * mono_set_break_policy:
5114 * policy_callback: the new callback function
5116 * Allow embedders to decide wherther to actually obey breakpoint instructions
5117 * (both break IL instructions and Debugger.Break () method calls), for example
5118 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
5119 * untrusted or semi-trusted code.
5121 * @policy_callback will be called every time a break point instruction needs to
5122 * be inserted with the method argument being the method that calls Debugger.Break()
5123 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
5124 * if it wants the breakpoint to not be effective in the given method.
5125 * #MONO_BREAK_POLICY_ALWAYS is the default.
5128 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
5130 if (policy_callback)
5131 break_policy_func = policy_callback;
5133 break_policy_func = always_insert_breakpoint;
5137 should_insert_brekpoint (MonoMethod *method) {
5138 switch (break_policy_func (method)) {
5139 case MONO_BREAK_POLICY_ALWAYS:
5141 case MONO_BREAK_POLICY_NEVER:
5143 case MONO_BREAK_POLICY_ON_DBG:
5144 g_warning ("mdb no longer supported");
5147 g_warning ("Incorrect value returned from break policy callback");
5152 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
5154 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
5156 MonoInst *addr, *store, *load;
5157 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
5159 /* the bounds check is already done by the callers */
5160 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
5162 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
5163 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
5164 if (mini_type_is_reference (cfg, fsig->params [2]))
5165 emit_write_barrier (cfg, addr, load);
5167 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
5168 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
5175 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
5177 return mini_type_is_reference (cfg, &klass->byval_arg);
5181 emit_array_store (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, gboolean safety_checks)
5183 if (safety_checks && generic_class_is_reference_type (cfg, klass) &&
5184 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
5185 MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class, 1);
5186 MonoMethod *helper = mono_marshal_get_virtual_stelemref (obj_array);
5187 MonoInst *iargs [3];
5190 mono_class_setup_vtable (obj_array);
5191 g_assert (helper->slot);
5193 if (sp [0]->type != STACK_OBJ)
5195 if (sp [2]->type != STACK_OBJ)
5202 return mono_emit_method_call (cfg, helper, iargs, sp [0]);
5206 if (mini_is_gsharedvt_variable_klass (cfg, klass)) {
5209 // FIXME-VT: OP_ICONST optimization
5210 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
5211 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5212 ins->opcode = OP_STOREV_MEMBASE;
5213 } else if (sp [1]->opcode == OP_ICONST) {
5214 int array_reg = sp [0]->dreg;
5215 int index_reg = sp [1]->dreg;
5216 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
5219 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
5220 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
5222 MonoInst *addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], safety_checks);
5223 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5224 if (generic_class_is_reference_type (cfg, klass))
5225 emit_write_barrier (cfg, addr, sp [2]);
5232 emit_array_unsafe_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
5237 eklass = mono_class_from_mono_type (fsig->params [2]);
5239 eklass = mono_class_from_mono_type (fsig->ret);
5242 return emit_array_store (cfg, eklass, args, FALSE);
5244 MonoInst *ins, *addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
5245 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &eklass->byval_arg, addr->dreg, 0);
5251 is_unsafe_mov_compatible (MonoClass *param_klass, MonoClass *return_klass)
5255 //Only allow for valuetypes
5256 if (!param_klass->valuetype || !return_klass->valuetype)
5260 if (param_klass->has_references || return_klass->has_references)
5263 /* Avoid mixing structs and primitive types/enums, they need to be handled differently in the JIT */
5264 if ((MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && !MONO_TYPE_ISSTRUCT (&return_klass->byval_arg)) ||
5265 (!MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && MONO_TYPE_ISSTRUCT (&return_klass->byval_arg)))
5268 if (param_klass->byval_arg.type == MONO_TYPE_R4 || param_klass->byval_arg.type == MONO_TYPE_R8 ||
5269 return_klass->byval_arg.type == MONO_TYPE_R4 || return_klass->byval_arg.type == MONO_TYPE_R8)
5272 //And have the same size
5273 if (mono_class_value_size (param_klass, &align) != mono_class_value_size (return_klass, &align))
5279 emit_array_unsafe_mov (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args)
5281 MonoClass *param_klass = mono_class_from_mono_type (fsig->params [0]);
5282 MonoClass *return_klass = mono_class_from_mono_type (fsig->ret);
5284 //Valuetypes that are semantically equivalent
5285 if (is_unsafe_mov_compatible (param_klass, return_klass))
5288 //Arrays of valuetypes that are semantically equivalent
5289 if (param_klass->rank == 1 && return_klass->rank == 1 && is_unsafe_mov_compatible (param_klass->element_class, return_klass->element_class))
5296 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5298 #ifdef MONO_ARCH_SIMD_INTRINSICS
5299 MonoInst *ins = NULL;
5301 if (cfg->opt & MONO_OPT_SIMD) {
5302 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
5308 return mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
5312 emit_memory_barrier (MonoCompile *cfg, int kind)
5314 MonoInst *ins = NULL;
5315 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
5316 MONO_ADD_INS (cfg->cbb, ins);
5317 ins->backend.memory_barrier_kind = kind;
5323 llvm_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5325 MonoInst *ins = NULL;
5328 /* The LLVM backend supports these intrinsics */
5329 if (cmethod->klass == mono_defaults.math_class) {
5330 if (strcmp (cmethod->name, "Sin") == 0) {
5332 } else if (strcmp (cmethod->name, "Cos") == 0) {
5334 } else if (strcmp (cmethod->name, "Sqrt") == 0) {
5336 } else if (strcmp (cmethod->name, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8) {
5341 MONO_INST_NEW (cfg, ins, opcode);
5342 ins->type = STACK_R8;
5343 ins->dreg = mono_alloc_freg (cfg);
5344 ins->sreg1 = args [0]->dreg;
5345 MONO_ADD_INS (cfg->cbb, ins);
5349 if (cfg->opt & MONO_OPT_CMOV) {
5350 if (strcmp (cmethod->name, "Min") == 0) {
5351 if (fsig->params [0]->type == MONO_TYPE_I4)
5353 if (fsig->params [0]->type == MONO_TYPE_U4)
5354 opcode = OP_IMIN_UN;
5355 else if (fsig->params [0]->type == MONO_TYPE_I8)
5357 else if (fsig->params [0]->type == MONO_TYPE_U8)
5358 opcode = OP_LMIN_UN;
5359 } else if (strcmp (cmethod->name, "Max") == 0) {
5360 if (fsig->params [0]->type == MONO_TYPE_I4)
5362 if (fsig->params [0]->type == MONO_TYPE_U4)
5363 opcode = OP_IMAX_UN;
5364 else if (fsig->params [0]->type == MONO_TYPE_I8)
5366 else if (fsig->params [0]->type == MONO_TYPE_U8)
5367 opcode = OP_LMAX_UN;
5372 MONO_INST_NEW (cfg, ins, opcode);
5373 ins->type = fsig->params [0]->type == MONO_TYPE_I4 ? STACK_I4 : STACK_I8;
5374 ins->dreg = mono_alloc_ireg (cfg);
5375 ins->sreg1 = args [0]->dreg;
5376 ins->sreg2 = args [1]->dreg;
5377 MONO_ADD_INS (cfg->cbb, ins);
5385 mini_emit_inst_for_sharable_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5387 if (cmethod->klass == mono_defaults.array_class) {
5388 if (strcmp (cmethod->name, "UnsafeStore") == 0)
5389 return emit_array_unsafe_access (cfg, fsig, args, TRUE);
5390 else if (strcmp (cmethod->name, "UnsafeLoad") == 0)
5391 return emit_array_unsafe_access (cfg, fsig, args, FALSE);
5392 else if (strcmp (cmethod->name, "UnsafeMov") == 0)
5393 return emit_array_unsafe_mov (cfg, fsig, args);
5400 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5402 MonoInst *ins = NULL;
5404 static MonoClass *runtime_helpers_class = NULL;
5405 if (! runtime_helpers_class)
5406 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
5407 "System.Runtime.CompilerServices", "RuntimeHelpers");
5409 if (cmethod->klass == mono_defaults.string_class) {
5410 if (strcmp (cmethod->name, "get_Chars") == 0) {
5411 int dreg = alloc_ireg (cfg);
5412 int index_reg = alloc_preg (cfg);
5413 int mult_reg = alloc_preg (cfg);
5414 int add_reg = alloc_preg (cfg);
5416 #if SIZEOF_REGISTER == 8
5417 /* The array reg is 64 bits but the index reg is only 32 */
5418 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
5420 index_reg = args [1]->dreg;
5422 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
5424 #if defined(TARGET_X86) || defined(TARGET_AMD64)
5425 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, MONO_STRUCT_OFFSET (MonoString, chars));
5426 add_reg = ins->dreg;
5427 /* Avoid a warning */
5429 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5432 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
5433 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
5434 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5435 add_reg, MONO_STRUCT_OFFSET (MonoString, chars));
5437 type_from_op (ins, NULL, NULL);
5439 } else if (strcmp (cmethod->name, "get_Length") == 0) {
5440 int dreg = alloc_ireg (cfg);
5441 /* Decompose later to allow more optimizations */
5442 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
5443 ins->type = STACK_I4;
5444 ins->flags |= MONO_INST_FAULT;
5445 cfg->cbb->has_array_access = TRUE;
5446 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
5449 } else if (strcmp (cmethod->name, "InternalSetChar") == 0) {
5450 int mult_reg = alloc_preg (cfg);
5451 int add_reg = alloc_preg (cfg);
5453 /* The corlib functions check for oob already. */
5454 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
5455 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
5456 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, MONO_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
5457 return cfg->cbb->last_ins;
5460 } else if (cmethod->klass == mono_defaults.object_class) {
5462 if (strcmp (cmethod->name, "GetType") == 0) {
5463 int dreg = alloc_ireg_ref (cfg);
5464 int vt_reg = alloc_preg (cfg);
5465 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
5466 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, MONO_STRUCT_OFFSET (MonoVTable, type));
5467 type_from_op (ins, NULL, NULL);
5470 #if !defined(MONO_ARCH_EMULATE_MUL_DIV)
5471 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0 && !mono_gc_is_moving ()) {
5472 int dreg = alloc_ireg (cfg);
5473 int t1 = alloc_ireg (cfg);
5475 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
5476 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
5477 ins->type = STACK_I4;
5481 } else if (strcmp (cmethod->name, ".ctor") == 0) {
5482 MONO_INST_NEW (cfg, ins, OP_NOP);
5483 MONO_ADD_INS (cfg->cbb, ins);
5487 } else if (cmethod->klass == mono_defaults.array_class) {
5488 if (!cfg->gsharedvt && strcmp (cmethod->name + 1, "etGenericValueImpl") == 0)
5489 return emit_array_generic_access (cfg, fsig, args, *cmethod->name == 'S');
5491 #ifndef MONO_BIG_ARRAYS
5493 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
5496 if ((strcmp (cmethod->name, "GetLength") == 0 || strcmp (cmethod->name, "GetLowerBound") == 0) && args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
5497 int dreg = alloc_ireg (cfg);
5498 int bounds_reg = alloc_ireg_mp (cfg);
5499 MonoBasicBlock *end_bb, *szarray_bb;
5500 gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
5502 NEW_BBLOCK (cfg, end_bb);
5503 NEW_BBLOCK (cfg, szarray_bb);
5505 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
5506 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
5507 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
5508 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
5509 /* Non-szarray case */
5511 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5512 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5514 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5515 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5516 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
5517 MONO_START_BB (cfg, szarray_bb);
5520 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5521 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
5523 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
5524 MONO_START_BB (cfg, end_bb);
5526 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
5527 ins->type = STACK_I4;
5533 if (cmethod->name [0] != 'g')
5536 if (strcmp (cmethod->name, "get_Rank") == 0) {
5537 int dreg = alloc_ireg (cfg);
5538 int vtable_reg = alloc_preg (cfg);
5539 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
5540 args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
5541 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
5542 vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
5543 type_from_op (ins, NULL, NULL);
5546 } else if (strcmp (cmethod->name, "get_Length") == 0) {
5547 int dreg = alloc_ireg (cfg);
5549 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5550 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
5551 type_from_op (ins, NULL, NULL);
5556 } else if (cmethod->klass == runtime_helpers_class) {
5558 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0) {
5559 EMIT_NEW_ICONST (cfg, ins, MONO_STRUCT_OFFSET (MonoString, chars));
5563 } else if (cmethod->klass == mono_defaults.thread_class) {
5564 if (strcmp (cmethod->name, "SpinWait_nop") == 0) {
5565 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
5566 MONO_ADD_INS (cfg->cbb, ins);
5568 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0) {
5569 return emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5571 } else if (cmethod->klass == mono_defaults.monitor_class) {
5572 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
5573 if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 1) {
5576 if (COMPILE_LLVM (cfg)) {
5578 * Pass the argument normally, the LLVM backend will handle the
5579 * calling convention problems.
5581 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
5583 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
5584 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
5585 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
5586 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
5589 return (MonoInst*)call;
5590 #if defined(MONO_ARCH_MONITOR_LOCK_TAKEN_REG)
5591 } else if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 2) {
5594 if (COMPILE_LLVM (cfg)) {
5596 * Pass the argument normally, the LLVM backend will handle the
5597 * calling convention problems.
5599 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER_V4, NULL, helper_sig_monitor_enter_v4_trampoline_llvm, args);
5601 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER_V4,
5602 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
5603 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg, MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
5604 mono_call_inst_add_outarg_reg (cfg, call, args [1]->dreg, MONO_ARCH_MONITOR_LOCK_TAKEN_REG, FALSE);
5607 return (MonoInst*)call;
5609 } else if (strcmp (cmethod->name, "Exit") == 0) {
5612 if (COMPILE_LLVM (cfg)) {
5613 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
5615 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
5616 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
5617 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
5618 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
5621 return (MonoInst*)call;
5624 } else if (cmethod->klass->image == mono_defaults.corlib &&
5625 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
5626 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
5629 #if SIZEOF_REGISTER == 8
5630 if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
5633 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5635 /* 64 bit reads are already atomic */
5636 MONO_INST_NEW (cfg, load_ins, OP_LOADI8_MEMBASE);
5637 load_ins->dreg = mono_alloc_preg (cfg);
5638 load_ins->inst_basereg = args [0]->dreg;
5639 load_ins->inst_offset = 0;
5640 MONO_ADD_INS (cfg->cbb, load_ins);
5642 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5648 if (strcmp (cmethod->name, "Increment") == 0) {
5649 MonoInst *ins_iconst;
5652 if (fsig->params [0]->type == MONO_TYPE_I4) {
5653 opcode = OP_ATOMIC_ADD_I4;
5654 cfg->has_atomic_add_i4 = TRUE;
5656 #if SIZEOF_REGISTER == 8
5657 else if (fsig->params [0]->type == MONO_TYPE_I8)
5658 opcode = OP_ATOMIC_ADD_I8;
5661 if (!mono_arch_opcode_supported (opcode))
5663 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
5664 ins_iconst->inst_c0 = 1;
5665 ins_iconst->dreg = mono_alloc_ireg (cfg);
5666 MONO_ADD_INS (cfg->cbb, ins_iconst);
5668 MONO_INST_NEW (cfg, ins, opcode);
5669 ins->dreg = mono_alloc_ireg (cfg);
5670 ins->inst_basereg = args [0]->dreg;
5671 ins->inst_offset = 0;
5672 ins->sreg2 = ins_iconst->dreg;
5673 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
5674 MONO_ADD_INS (cfg->cbb, ins);
5676 } else if (strcmp (cmethod->name, "Decrement") == 0) {
5677 MonoInst *ins_iconst;
5680 if (fsig->params [0]->type == MONO_TYPE_I4) {
5681 opcode = OP_ATOMIC_ADD_I4;
5682 cfg->has_atomic_add_i4 = TRUE;
5684 #if SIZEOF_REGISTER == 8
5685 else if (fsig->params [0]->type == MONO_TYPE_I8)
5686 opcode = OP_ATOMIC_ADD_I8;
5689 if (!mono_arch_opcode_supported (opcode))
5691 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
5692 ins_iconst->inst_c0 = -1;
5693 ins_iconst->dreg = mono_alloc_ireg (cfg);
5694 MONO_ADD_INS (cfg->cbb, ins_iconst);
5696 MONO_INST_NEW (cfg, ins, opcode);
5697 ins->dreg = mono_alloc_ireg (cfg);
5698 ins->inst_basereg = args [0]->dreg;
5699 ins->inst_offset = 0;
5700 ins->sreg2 = ins_iconst->dreg;
5701 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
5702 MONO_ADD_INS (cfg->cbb, ins);
5704 } else if (strcmp (cmethod->name, "Add") == 0) {
5707 if (fsig->params [0]->type == MONO_TYPE_I4) {
5708 opcode = OP_ATOMIC_ADD_I4;
5709 cfg->has_atomic_add_i4 = TRUE;
5711 #if SIZEOF_REGISTER == 8
5712 else if (fsig->params [0]->type == MONO_TYPE_I8)
5713 opcode = OP_ATOMIC_ADD_I8;
5716 if (!mono_arch_opcode_supported (opcode))
5718 MONO_INST_NEW (cfg, ins, opcode);
5719 ins->dreg = mono_alloc_ireg (cfg);
5720 ins->inst_basereg = args [0]->dreg;
5721 ins->inst_offset = 0;
5722 ins->sreg2 = args [1]->dreg;
5723 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
5724 MONO_ADD_INS (cfg->cbb, ins);
5728 if (strcmp (cmethod->name, "Exchange") == 0) {
5730 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [0]);
5732 if (fsig->params [0]->type == MONO_TYPE_I4) {
5733 opcode = OP_ATOMIC_EXCHANGE_I4;
5734 cfg->has_atomic_exchange_i4 = TRUE;
5736 #if SIZEOF_REGISTER == 8
5737 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I8) ||
5738 (fsig->params [0]->type == MONO_TYPE_I))
5739 opcode = OP_ATOMIC_EXCHANGE_I8;
5741 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I)) {
5742 opcode = OP_ATOMIC_EXCHANGE_I4;
5743 cfg->has_atomic_exchange_i4 = TRUE;
5749 if (!mono_arch_opcode_supported (opcode))
5752 MONO_INST_NEW (cfg, ins, opcode);
5753 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : mono_alloc_ireg (cfg);
5754 ins->inst_basereg = args [0]->dreg;
5755 ins->inst_offset = 0;
5756 ins->sreg2 = args [1]->dreg;
5757 MONO_ADD_INS (cfg->cbb, ins);
5759 switch (fsig->params [0]->type) {
5761 ins->type = STACK_I4;
5765 ins->type = STACK_I8;
5767 case MONO_TYPE_OBJECT:
5768 ins->type = STACK_OBJ;
5771 g_assert_not_reached ();
5774 if (cfg->gen_write_barriers && is_ref)
5775 emit_write_barrier (cfg, args [0], args [1]);
5778 if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
5780 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [1]);
5781 if (fsig->params [1]->type == MONO_TYPE_I4)
5783 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I)
5784 size = sizeof (gpointer);
5785 else if (sizeof (gpointer) == 8 && fsig->params [1]->type == MONO_TYPE_I8)
5788 if (!mono_arch_opcode_supported (OP_ATOMIC_CAS_I4))
5790 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
5791 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
5792 ins->sreg1 = args [0]->dreg;
5793 ins->sreg2 = args [1]->dreg;
5794 ins->sreg3 = args [2]->dreg;
5795 ins->type = STACK_I4;
5796 MONO_ADD_INS (cfg->cbb, ins);
5797 cfg->has_atomic_cas_i4 = TRUE;
5798 } else if (size == 8) {
5799 if (!mono_arch_opcode_supported (OP_ATOMIC_CAS_I8))
5801 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I8);
5802 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
5803 ins->sreg1 = args [0]->dreg;
5804 ins->sreg2 = args [1]->dreg;
5805 ins->sreg3 = args [2]->dreg;
5806 ins->type = STACK_I8;
5807 MONO_ADD_INS (cfg->cbb, ins);
5809 /* g_assert_not_reached (); */
5811 if (cfg->gen_write_barriers && is_ref)
5812 emit_write_barrier (cfg, args [0], args [1]);
5815 if (strcmp (cmethod->name, "MemoryBarrier") == 0)
5816 ins = emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5820 } else if (cmethod->klass->image == mono_defaults.corlib) {
5821 if (cmethod->name [0] == 'B' && strcmp (cmethod->name, "Break") == 0
5822 && strcmp (cmethod->klass->name, "Debugger") == 0) {
5823 if (should_insert_brekpoint (cfg->method)) {
5824 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
5826 MONO_INST_NEW (cfg, ins, OP_NOP);
5827 MONO_ADD_INS (cfg->cbb, ins);
5831 if (cmethod->name [0] == 'g' && strcmp (cmethod->name, "get_IsRunningOnWindows") == 0
5832 && strcmp (cmethod->klass->name, "Environment") == 0) {
5834 EMIT_NEW_ICONST (cfg, ins, 1);
5836 EMIT_NEW_ICONST (cfg, ins, 0);
5840 } else if (cmethod->klass == mono_defaults.math_class) {
5842 * There is general branches code for Min/Max, but it does not work for
5844 * http://everything2.com/?node_id=1051618
5846 } else if ((!strcmp (cmethod->klass->image->assembly->aname.name, "MonoMac") || !strcmp (cmethod->klass->image->assembly->aname.name, "monotouch")) && !strcmp (cmethod->klass->name, "Selector") && !strcmp (cmethod->name, "GetHandle") && cfg->compile_aot && (args [0]->opcode == OP_GOT_ENTRY || args[0]->opcode == OP_AOTCONST)) {
5847 #ifdef MONO_ARCH_HAVE_OBJC_GET_SELECTOR
5849 MonoJumpInfoToken *ji;
5852 cfg->disable_llvm = TRUE;
5854 if (args [0]->opcode == OP_GOT_ENTRY) {
5855 pi = args [0]->inst_p1;
5856 g_assert (pi->opcode == OP_PATCH_INFO);
5857 g_assert (GPOINTER_TO_INT (pi->inst_p1) == MONO_PATCH_INFO_LDSTR);
5860 g_assert (GPOINTER_TO_INT (args [0]->inst_p1) == MONO_PATCH_INFO_LDSTR);
5861 ji = args [0]->inst_p0;
5864 NULLIFY_INS (args [0]);
5867 s = mono_ldstr (cfg->domain, ji->image, mono_metadata_token_index (ji->token));
5868 MONO_INST_NEW (cfg, ins, OP_OBJC_GET_SELECTOR);
5869 ins->dreg = mono_alloc_ireg (cfg);
5871 ins->inst_p0 = mono_string_to_utf8 (s);
5872 MONO_ADD_INS (cfg->cbb, ins);
5877 #ifdef MONO_ARCH_SIMD_INTRINSICS
5878 if (cfg->opt & MONO_OPT_SIMD) {
5879 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
5885 ins = mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
5889 if (COMPILE_LLVM (cfg)) {
5890 ins = llvm_emit_inst_for_method (cfg, cmethod, fsig, args);
5895 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
5899 * This entry point could be used later for arbitrary method
5902 inline static MonoInst*
5903 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
5904 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
5906 if (method->klass == mono_defaults.string_class) {
5907 /* managed string allocation support */
5908 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS) && !(cfg->opt & MONO_OPT_SHARED)) {
5909 MonoInst *iargs [2];
5910 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
5911 MonoMethod *managed_alloc = NULL;
5913 g_assert (vtable); /*Should not fail since it System.String*/
5914 #ifndef MONO_CROSS_COMPILE
5915 managed_alloc = mono_gc_get_managed_allocator (method->klass, FALSE);
5919 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
5920 iargs [1] = args [0];
5921 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
5928 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
5930 MonoInst *store, *temp;
5933 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
5934 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
5937 * FIXME: We should use *args++ = sp [0], but that would mean the arg
5938 * would be different than the MonoInst's used to represent arguments, and
5939 * the ldelema implementation can't deal with that.
5940 * Solution: When ldelema is used on an inline argument, create a var for
5941 * it, emit ldelema on that var, and emit the saving code below in
5942 * inline_method () if needed.
5944 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
5945 cfg->args [i] = temp;
5946 /* This uses cfg->args [i] which is set by the preceeding line */
5947 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
5948 store->cil_code = sp [0]->cil_code;
5953 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
5954 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
5956 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
5958 check_inline_called_method_name_limit (MonoMethod *called_method)
5961 static const char *limit = NULL;
5963 if (limit == NULL) {
5964 const char *limit_string = g_getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
5966 if (limit_string != NULL)
5967 limit = limit_string;
5972 if (limit [0] != '\0') {
5973 char *called_method_name = mono_method_full_name (called_method, TRUE);
5975 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
5976 g_free (called_method_name);
5978 //return (strncmp_result <= 0);
5979 return (strncmp_result == 0);
5986 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
5988 check_inline_caller_method_name_limit (MonoMethod *caller_method)
5991 static const char *limit = NULL;
5993 if (limit == NULL) {
5994 const char *limit_string = g_getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
5995 if (limit_string != NULL) {
5996 limit = limit_string;
6002 if (limit [0] != '\0') {
6003 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
6005 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
6006 g_free (caller_method_name);
6008 //return (strncmp_result <= 0);
6009 return (strncmp_result == 0);
6017 emit_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
6019 static double r8_0 = 0.0;
6023 rtype = mini_replace_type (rtype);
6027 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
6028 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
6029 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
6030 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
6031 MONO_EMIT_NEW_I8CONST (cfg, dreg, 0);
6032 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
6033 MONO_INST_NEW (cfg, ins, OP_R8CONST);
6034 ins->type = STACK_R8;
6035 ins->inst_p0 = (void*)&r8_0;
6037 MONO_ADD_INS (cfg->cbb, ins);
6038 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
6039 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
6040 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
6041 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (cfg, rtype)) {
6042 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
6044 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
6049 emit_dummy_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
6053 rtype = mini_replace_type (rtype);
6057 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_PCONST);
6058 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
6059 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_ICONST);
6060 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
6061 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_I8CONST);
6062 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
6063 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R8CONST);
6064 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
6065 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
6066 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
6067 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (cfg, rtype)) {
6068 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
6070 emit_init_rvar (cfg, dreg, rtype);
6074 /* If INIT is FALSE, emit dummy initialization statements to keep the IR valid */
6076 emit_init_local (MonoCompile *cfg, int local, MonoType *type, gboolean init)
6078 MonoInst *var = cfg->locals [local];
6079 if (COMPILE_SOFT_FLOAT (cfg)) {
6081 int reg = alloc_dreg (cfg, var->type);
6082 emit_init_rvar (cfg, reg, type);
6083 EMIT_NEW_LOCSTORE (cfg, store, local, cfg->cbb->last_ins);
6086 emit_init_rvar (cfg, var->dreg, type);
6088 emit_dummy_init_rvar (cfg, var->dreg, type);
6095 * Return the cost of inlining CMETHOD.
6098 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
6099 guchar *ip, guint real_offset, gboolean inline_always, MonoBasicBlock **out_cbb)
6101 MonoInst *ins, *rvar = NULL;
6102 MonoMethodHeader *cheader;
6103 MonoBasicBlock *ebblock, *sbblock;
6105 MonoMethod *prev_inlined_method;
6106 MonoInst **prev_locals, **prev_args;
6107 MonoType **prev_arg_types;
6108 guint prev_real_offset;
6109 GHashTable *prev_cbb_hash;
6110 MonoBasicBlock **prev_cil_offset_to_bb;
6111 MonoBasicBlock *prev_cbb;
6112 unsigned char* prev_cil_start;
6113 guint32 prev_cil_offset_to_bb_len;
6114 MonoMethod *prev_current_method;
6115 MonoGenericContext *prev_generic_context;
6116 gboolean ret_var_set, prev_ret_var_set, prev_disable_inline, virtual = FALSE;
6118 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
6120 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
6121 if ((! inline_always) && ! check_inline_called_method_name_limit (cmethod))
6124 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
6125 if ((! inline_always) && ! check_inline_caller_method_name_limit (cfg->method))
6129 if (cfg->verbose_level > 2)
6130 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
6132 if (!cmethod->inline_info) {
6133 cfg->stat_inlineable_methods++;
6134 cmethod->inline_info = 1;
6137 /* allocate local variables */
6138 cheader = mono_method_get_header (cmethod);
6140 if (cheader == NULL || mono_loader_get_last_error ()) {
6141 MonoLoaderError *error = mono_loader_get_last_error ();
6144 mono_metadata_free_mh (cheader);
6145 if (inline_always && error)
6146 mono_cfg_set_exception (cfg, error->exception_type);
6148 mono_loader_clear_error ();
6152 /*Must verify before creating locals as it can cause the JIT to assert.*/
6153 if (mono_compile_is_broken (cfg, cmethod, FALSE)) {
6154 mono_metadata_free_mh (cheader);
6158 /* allocate space to store the return value */
6159 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6160 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
6163 prev_locals = cfg->locals;
6164 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
6165 for (i = 0; i < cheader->num_locals; ++i)
6166 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
6168 /* allocate start and end blocks */
6169 /* This is needed so if the inline is aborted, we can clean up */
6170 NEW_BBLOCK (cfg, sbblock);
6171 sbblock->real_offset = real_offset;
6173 NEW_BBLOCK (cfg, ebblock);
6174 ebblock->block_num = cfg->num_bblocks++;
6175 ebblock->real_offset = real_offset;
6177 prev_args = cfg->args;
6178 prev_arg_types = cfg->arg_types;
6179 prev_inlined_method = cfg->inlined_method;
6180 cfg->inlined_method = cmethod;
6181 cfg->ret_var_set = FALSE;
6182 cfg->inline_depth ++;
6183 prev_real_offset = cfg->real_offset;
6184 prev_cbb_hash = cfg->cbb_hash;
6185 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
6186 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
6187 prev_cil_start = cfg->cil_start;
6188 prev_cbb = cfg->cbb;
6189 prev_current_method = cfg->current_method;
6190 prev_generic_context = cfg->generic_context;
6191 prev_ret_var_set = cfg->ret_var_set;
6192 prev_disable_inline = cfg->disable_inline;
6194 if (*ip == CEE_CALLVIRT && !(cmethod->flags & METHOD_ATTRIBUTE_STATIC))
6197 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, sp, real_offset, virtual);
6199 ret_var_set = cfg->ret_var_set;
6201 cfg->inlined_method = prev_inlined_method;
6202 cfg->real_offset = prev_real_offset;
6203 cfg->cbb_hash = prev_cbb_hash;
6204 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
6205 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
6206 cfg->cil_start = prev_cil_start;
6207 cfg->locals = prev_locals;
6208 cfg->args = prev_args;
6209 cfg->arg_types = prev_arg_types;
6210 cfg->current_method = prev_current_method;
6211 cfg->generic_context = prev_generic_context;
6212 cfg->ret_var_set = prev_ret_var_set;
6213 cfg->disable_inline = prev_disable_inline;
6214 cfg->inline_depth --;
6216 if ((costs >= 0 && costs < 60) || inline_always) {
6217 if (cfg->verbose_level > 2)
6218 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
6220 cfg->stat_inlined_methods++;
6222 /* always add some code to avoid block split failures */
6223 MONO_INST_NEW (cfg, ins, OP_NOP);
6224 MONO_ADD_INS (prev_cbb, ins);
6226 prev_cbb->next_bb = sbblock;
6227 link_bblock (cfg, prev_cbb, sbblock);
6230 * Get rid of the begin and end bblocks if possible to aid local
6233 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
6235 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
6236 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
6238 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
6239 MonoBasicBlock *prev = ebblock->in_bb [0];
6240 mono_merge_basic_blocks (cfg, prev, ebblock);
6242 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
6243 mono_merge_basic_blocks (cfg, prev_cbb, prev);
6244 cfg->cbb = prev_cbb;
6248 * Its possible that the rvar is set in some prev bblock, but not in others.
6254 for (i = 0; i < ebblock->in_count; ++i) {
6255 bb = ebblock->in_bb [i];
6257 if (bb->last_ins && bb->last_ins->opcode == OP_NOT_REACHED) {
6260 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
6268 *out_cbb = cfg->cbb;
6272 * If the inlined method contains only a throw, then the ret var is not
6273 * set, so set it to a dummy value.
6276 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
6278 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
6281 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
6284 if (cfg->verbose_level > 2)
6285 printf ("INLINE ABORTED %s (cost %d)\n", mono_method_full_name (cmethod, TRUE), costs);
6286 cfg->exception_type = MONO_EXCEPTION_NONE;
6287 mono_loader_clear_error ();
6289 /* This gets rid of the newly added bblocks */
6290 cfg->cbb = prev_cbb;
6292 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
6297 * Some of these comments may well be out-of-date.
6298 * Design decisions: we do a single pass over the IL code (and we do bblock
6299 * splitting/merging in the few cases when it's required: a back jump to an IL
6300 * address that was not already seen as bblock starting point).
6301 * Code is validated as we go (full verification is still better left to metadata/verify.c).
6302 * Complex operations are decomposed in simpler ones right away. We need to let the
6303 * arch-specific code peek and poke inside this process somehow (except when the
6304 * optimizations can take advantage of the full semantic info of coarse opcodes).
6305 * All the opcodes of the form opcode.s are 'normalized' to opcode.
6306 * MonoInst->opcode initially is the IL opcode or some simplification of that
6307 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
6308 * opcode with value bigger than OP_LAST.
6309 * At this point the IR can be handed over to an interpreter, a dumb code generator
6310 * or to the optimizing code generator that will translate it to SSA form.
6312 * Profiling directed optimizations.
6313 * We may compile by default with few or no optimizations and instrument the code
6314 * or the user may indicate what methods to optimize the most either in a config file
6315 * or through repeated runs where the compiler applies offline the optimizations to
6316 * each method and then decides if it was worth it.
6319 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
6320 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
6321 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
6322 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
6323 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
6324 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
6325 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
6326 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) TYPE_LOAD_ERROR ((klass))
6328 /* offset from br.s -> br like opcodes */
6329 #define BIG_BRANCH_OFFSET 13
6332 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
6334 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
6336 return b == NULL || b == bb;
6340 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
6342 unsigned char *ip = start;
6343 unsigned char *target;
6346 MonoBasicBlock *bblock;
6347 const MonoOpcode *opcode;
6350 cli_addr = ip - start;
6351 i = mono_opcode_value ((const guint8 **)&ip, end);
6354 opcode = &mono_opcodes [i];
6355 switch (opcode->argument) {
6356 case MonoInlineNone:
6359 case MonoInlineString:
6360 case MonoInlineType:
6361 case MonoInlineField:
6362 case MonoInlineMethod:
6365 case MonoShortInlineR:
6372 case MonoShortInlineVar:
6373 case MonoShortInlineI:
6376 case MonoShortInlineBrTarget:
6377 target = start + cli_addr + 2 + (signed char)ip [1];
6378 GET_BBLOCK (cfg, bblock, target);
6381 GET_BBLOCK (cfg, bblock, ip);
6383 case MonoInlineBrTarget:
6384 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
6385 GET_BBLOCK (cfg, bblock, target);
6388 GET_BBLOCK (cfg, bblock, ip);
6390 case MonoInlineSwitch: {
6391 guint32 n = read32 (ip + 1);
6394 cli_addr += 5 + 4 * n;
6395 target = start + cli_addr;
6396 GET_BBLOCK (cfg, bblock, target);
6398 for (j = 0; j < n; ++j) {
6399 target = start + cli_addr + (gint32)read32 (ip);
6400 GET_BBLOCK (cfg, bblock, target);
6410 g_assert_not_reached ();
6413 if (i == CEE_THROW) {
6414 unsigned char *bb_start = ip - 1;
6416 /* Find the start of the bblock containing the throw */
6418 while ((bb_start >= start) && !bblock) {
6419 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
6423 bblock->out_of_line = 1;
6433 static inline MonoMethod *
6434 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
6438 if (m->wrapper_type != MONO_WRAPPER_NONE) {
6439 method = mono_method_get_wrapper_data (m, token);
6442 method = mono_class_inflate_generic_method_checked (method, context, &error);
6443 g_assert (mono_error_ok (&error)); /* FIXME don't swallow the error */
6446 method = mono_get_method_full (m->klass->image, token, klass, context);
6452 static inline MonoMethod *
6453 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
6455 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
6457 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
6463 static inline MonoClass*
6464 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
6469 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6470 klass = mono_method_get_wrapper_data (method, token);
6472 klass = mono_class_inflate_generic_class (klass, context);
6474 klass = mono_class_get_and_inflate_typespec_checked (method->klass->image, token, context, &error);
6475 mono_error_cleanup (&error); /* FIXME don't swallow the error */
6478 mono_class_init (klass);
6482 static inline MonoMethodSignature*
6483 mini_get_signature (MonoMethod *method, guint32 token, MonoGenericContext *context)
6485 MonoMethodSignature *fsig;
6487 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6490 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
6492 fsig = mono_inflate_generic_signature (fsig, context, &error);
6494 g_assert (mono_error_ok (&error));
6497 fsig = mono_metadata_parse_signature (method->klass->image, token);
6503 * Returns TRUE if the JIT should abort inlining because "callee"
6504 * is influenced by security attributes.
6507 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
6511 if ((cfg->method != caller) && mono_security_method_has_declsec (callee)) {
6515 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
6516 if (result == MONO_JIT_SECURITY_OK)
6519 if (result == MONO_JIT_LINKDEMAND_ECMA) {
6520 /* Generate code to throw a SecurityException before the actual call/link */
6521 MonoSecurityManager *secman = mono_security_manager_get_methods ();
6524 NEW_ICONST (cfg, args [0], 4);
6525 NEW_METHODCONST (cfg, args [1], caller);
6526 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
6527 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
6528 /* don't hide previous results */
6529 mono_cfg_set_exception (cfg, MONO_EXCEPTION_SECURITY_LINKDEMAND);
6530 cfg->exception_data = result;
6538 throw_exception (void)
6540 static MonoMethod *method = NULL;
6543 MonoSecurityManager *secman = mono_security_manager_get_methods ();
6544 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
6551 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
6553 MonoMethod *thrower = throw_exception ();
6556 EMIT_NEW_PCONST (cfg, args [0], ex);
6557 mono_emit_method_call (cfg, thrower, args, NULL);
6561 * Return the original method is a wrapper is specified. We can only access
6562 * the custom attributes from the original method.
6565 get_original_method (MonoMethod *method)
6567 if (method->wrapper_type == MONO_WRAPPER_NONE)
6570 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
6571 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
6574 /* in other cases we need to find the original method */
6575 return mono_marshal_method_from_wrapper (method);
6579 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
6580 MonoBasicBlock *bblock, unsigned char *ip)
6582 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
6583 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
6585 emit_throw_exception (cfg, ex);
6589 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
6590 MonoBasicBlock *bblock, unsigned char *ip)
6592 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
6593 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
6595 emit_throw_exception (cfg, ex);
6599 * Check that the IL instructions at ip are the array initialization
6600 * sequence and return the pointer to the data and the size.
6603 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
6606 * newarr[System.Int32]
6608 * ldtoken field valuetype ...
6609 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
6611 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
6613 guint32 token = read32 (ip + 7);
6614 guint32 field_token = read32 (ip + 2);
6615 guint32 field_index = field_token & 0xffffff;
6617 const char *data_ptr;
6619 MonoMethod *cmethod;
6620 MonoClass *dummy_class;
6621 MonoClassField *field = mono_field_from_token_checked (method->klass->image, field_token, &dummy_class, NULL, &error);
6625 mono_error_cleanup (&error); /* FIXME don't swallow the error */
6629 *out_field_token = field_token;
6631 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
6634 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
6636 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
6637 case MONO_TYPE_BOOLEAN:
6641 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
6642 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
6643 case MONO_TYPE_CHAR:
6660 if (size > mono_type_size (field->type, &dummy_align))
6663 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
6664 if (!image_is_dynamic (method->klass->image)) {
6665 field_index = read32 (ip + 2) & 0xffffff;
6666 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
6667 data_ptr = mono_image_rva_map (method->klass->image, rva);
6668 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
6669 /* for aot code we do the lookup on load */
6670 if (aot && data_ptr)
6671 return GUINT_TO_POINTER (rva);
6673 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
6675 data_ptr = mono_field_get_data (field);
6683 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
6685 char *method_fname = mono_method_full_name (method, TRUE);
6687 MonoMethodHeader *header = mono_method_get_header (method);
6689 if (header->code_size == 0)
6690 method_code = g_strdup ("method body is empty.");
6692 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
6693 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
6694 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
6695 g_free (method_fname);
6696 g_free (method_code);
6697 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
6701 set_exception_object (MonoCompile *cfg, MonoException *exception)
6703 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OBJECT_SUPPLIED);
6704 MONO_GC_REGISTER_ROOT_SINGLE (cfg->exception_ptr);
6705 cfg->exception_ptr = exception;
6709 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
6712 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
6713 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
6714 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
6715 /* Optimize reg-reg moves away */
6717 * Can't optimize other opcodes, since sp[0] might point to
6718 * the last ins of a decomposed opcode.
6720 sp [0]->dreg = (cfg)->locals [n]->dreg;
6722 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
6727 * ldloca inhibits many optimizations so try to get rid of it in common
6730 static inline unsigned char *
6731 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
6741 local = read16 (ip + 2);
6745 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
6746 /* From the INITOBJ case */
6747 token = read32 (ip + 2);
6748 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
6749 CHECK_TYPELOAD (klass);
6750 type = mini_replace_type (&klass->byval_arg);
6751 emit_init_local (cfg, local, type, TRUE);
6759 is_exception_class (MonoClass *class)
6762 if (class == mono_defaults.exception_class)
6764 class = class->parent;
6770 * is_jit_optimizer_disabled:
6772 * Determine whenever M's assembly has a DebuggableAttribute with the
6773 * IsJITOptimizerDisabled flag set.
6776 is_jit_optimizer_disabled (MonoMethod *m)
6778 MonoAssembly *ass = m->klass->image->assembly;
6779 MonoCustomAttrInfo* attrs;
6780 static MonoClass *klass;
6782 gboolean val = FALSE;
6785 if (ass->jit_optimizer_disabled_inited)
6786 return ass->jit_optimizer_disabled;
6789 klass = mono_class_from_name (mono_defaults.corlib, "System.Diagnostics", "DebuggableAttribute");
6792 ass->jit_optimizer_disabled = FALSE;
6793 mono_memory_barrier ();
6794 ass->jit_optimizer_disabled_inited = TRUE;
6798 attrs = mono_custom_attrs_from_assembly (ass);
6800 for (i = 0; i < attrs->num_attrs; ++i) {
6801 MonoCustomAttrEntry *attr = &attrs->attrs [i];
6804 MonoMethodSignature *sig;
6806 if (!attr->ctor || attr->ctor->klass != klass)
6808 /* Decode the attribute. See reflection.c */
6809 len = attr->data_size;
6810 p = (const char*)attr->data;
6811 g_assert (read16 (p) == 0x0001);
6814 // FIXME: Support named parameters
6815 sig = mono_method_signature (attr->ctor);
6816 if (sig->param_count != 2 || sig->params [0]->type != MONO_TYPE_BOOLEAN || sig->params [1]->type != MONO_TYPE_BOOLEAN)
6818 /* Two boolean arguments */
6822 mono_custom_attrs_free (attrs);
6825 ass->jit_optimizer_disabled = val;
6826 mono_memory_barrier ();
6827 ass->jit_optimizer_disabled_inited = TRUE;
6833 is_supported_tail_call (MonoCompile *cfg, MonoMethod *method, MonoMethod *cmethod, MonoMethodSignature *fsig, int call_opcode)
6835 gboolean supported_tail_call;
6838 #ifdef MONO_ARCH_HAVE_OP_TAIL_CALL
6839 supported_tail_call = mono_arch_tail_call_supported (cfg, mono_method_signature (method), mono_method_signature (cmethod));
6841 supported_tail_call = mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret);
6844 for (i = 0; i < fsig->param_count; ++i) {
6845 if (fsig->params [i]->byref || fsig->params [i]->type == MONO_TYPE_PTR || fsig->params [i]->type == MONO_TYPE_FNPTR)
6846 /* These can point to the current method's stack */
6847 supported_tail_call = FALSE;
6849 if (fsig->hasthis && cmethod->klass->valuetype)
6850 /* this might point to the current method's stack */
6851 supported_tail_call = FALSE;
6852 if (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)
6853 supported_tail_call = FALSE;
6854 if (cfg->method->save_lmf)
6855 supported_tail_call = FALSE;
6856 if (cmethod->wrapper_type && cmethod->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
6857 supported_tail_call = FALSE;
6858 if (call_opcode != CEE_CALL)
6859 supported_tail_call = FALSE;
6861 /* Debugging support */
6863 if (supported_tail_call) {
6864 if (!mono_debug_count ())
6865 supported_tail_call = FALSE;
6869 return supported_tail_call;
6872 /* the JIT intercepts ldflda instructions to the tlsdata field in ThreadLocal<T> and redirects
6873 * it to the thread local value based on the tls_offset field. Every other kind of access to
6874 * the field causes an assert.
6877 is_magic_tls_access (MonoClassField *field)
6879 if (strcmp (field->name, "tlsdata"))
6881 if (strcmp (field->parent->name, "ThreadLocal`1"))
6883 return field->parent->image == mono_defaults.corlib;
6886 /* emits the code needed to access a managed tls var (like ThreadStatic)
6887 * with the value of the tls offset in offset_reg. thread_ins represents the MonoInternalThread
6888 * pointer for the current thread.
6889 * Returns the MonoInst* representing the address of the tls var.
6892 emit_managed_static_data_access (MonoCompile *cfg, MonoInst *thread_ins, int offset_reg)
6895 int static_data_reg, array_reg, dreg;
6896 int offset2_reg, idx_reg;
6897 // inlined access to the tls data
6898 // idx = (offset >> 24) - 1;
6899 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
6900 static_data_reg = alloc_ireg (cfg);
6901 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, MONO_STRUCT_OFFSET (MonoInternalThread, static_data));
6902 idx_reg = alloc_ireg (cfg);
6903 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
6904 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
6905 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
6906 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
6907 array_reg = alloc_ireg (cfg);
6908 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
6909 offset2_reg = alloc_ireg (cfg);
6910 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
6911 dreg = alloc_ireg (cfg);
6912 EMIT_NEW_BIALU (cfg, addr, OP_PADD, dreg, array_reg, offset2_reg);
6917 * redirect access to the tlsdata field to the tls var given by the tls_offset field.
6918 * this address is cached per-method in cached_tls_addr.
6921 create_magic_tls_access (MonoCompile *cfg, MonoClassField *tls_field, MonoInst **cached_tls_addr, MonoInst *thread_local)
6923 MonoInst *load, *addr, *temp, *store, *thread_ins;
6924 MonoClassField *offset_field;
6926 if (*cached_tls_addr) {
6927 EMIT_NEW_TEMPLOAD (cfg, addr, (*cached_tls_addr)->inst_c0);
6930 thread_ins = mono_get_thread_intrinsic (cfg);
6931 offset_field = mono_class_get_field_from_name (tls_field->parent, "tls_offset");
6933 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, offset_field->type, thread_local->dreg, offset_field->offset);
6935 MONO_ADD_INS (cfg->cbb, thread_ins);
6937 MonoMethod *thread_method;
6938 thread_method = mono_class_get_method_from_name (mono_get_thread_class(), "CurrentInternalThread_internal", 0);
6939 thread_ins = mono_emit_method_call (cfg, thread_method, NULL, NULL);
6941 addr = emit_managed_static_data_access (cfg, thread_ins, load->dreg);
6942 addr->klass = mono_class_from_mono_type (tls_field->type);
6943 addr->type = STACK_MP;
6944 *cached_tls_addr = temp = mono_compile_create_var (cfg, type_from_stack_type (addr), OP_LOCAL);
6945 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, addr);
6947 EMIT_NEW_TEMPLOAD (cfg, addr, temp->inst_c0);
6954 * Handle calls made to ctors from NEWOBJ opcodes.
6956 * REF_BBLOCK will point to the current bblock after the call.
6959 handle_ctor_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used,
6960 MonoInst **sp, guint8 *ip, MonoBasicBlock **ref_bblock, int *inline_costs)
6962 MonoInst *vtable_arg = NULL, *callvirt_this_arg = NULL, *ins;
6963 MonoBasicBlock *bblock = *ref_bblock;
6965 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
6966 mono_method_is_generic_sharable (cmethod, TRUE)) {
6967 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
6968 mono_class_vtable (cfg->domain, cmethod->klass);
6969 CHECK_TYPELOAD (cmethod->klass);
6971 vtable_arg = emit_get_rgctx_method (cfg, context_used,
6972 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
6975 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
6976 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
6978 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
6980 CHECK_TYPELOAD (cmethod->klass);
6981 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
6986 /* Avoid virtual calls to ctors if possible */
6987 if (mono_class_is_marshalbyref (cmethod->klass))
6988 callvirt_this_arg = sp [0];
6990 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
6991 g_assert (MONO_TYPE_IS_VOID (fsig->ret));
6992 CHECK_CFG_EXCEPTION;
6993 } else if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
6994 mono_method_check_inlining (cfg, cmethod) &&
6995 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE)) {
6998 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, FALSE, &bblock))) {
6999 cfg->real_offset += 5;
7001 *inline_costs += costs - 5;
7002 *ref_bblock = bblock;
7004 INLINE_FAILURE ("inline failure");
7005 // FIXME-VT: Clean this up
7006 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))
7007 GSHAREDVT_FAILURE(*ip);
7008 mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, callvirt_this_arg, NULL, NULL);
7010 } else if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
7013 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE);
7014 mono_emit_calli (cfg, fsig, sp, addr, NULL, vtable_arg);
7015 } else if (context_used &&
7016 ((!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
7017 !mono_class_generic_sharing_enabled (cmethod->klass)) || cfg->gsharedvt)) {
7018 MonoInst *cmethod_addr;
7020 /* Generic calls made out of gsharedvt methods cannot be patched, so use an indirect call */
7022 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
7023 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7025 mono_emit_calli (cfg, fsig, sp, cmethod_addr, NULL, vtable_arg);
7027 INLINE_FAILURE ("ctor call");
7028 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp,
7029 callvirt_this_arg, NULL, vtable_arg);
7036 * mono_method_to_ir:
7038 * Translate the .net IL into linear IR.
7041 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
7042 MonoInst *return_var, MonoInst **inline_args,
7043 guint inline_offset, gboolean is_virtual_call)
7046 MonoInst *ins, **sp, **stack_start;
7047 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
7048 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
7049 MonoMethod *cmethod, *method_definition;
7050 MonoInst **arg_array;
7051 MonoMethodHeader *header;
7053 guint32 token, ins_flag;
7055 MonoClass *constrained_call = NULL;
7056 unsigned char *ip, *end, *target, *err_pos;
7057 MonoMethodSignature *sig;
7058 MonoGenericContext *generic_context = NULL;
7059 MonoGenericContainer *generic_container = NULL;
7060 MonoType **param_types;
7061 int i, n, start_new_bblock, dreg;
7062 int num_calls = 0, inline_costs = 0;
7063 int breakpoint_id = 0;
7065 MonoBoolean security, pinvoke;
7066 MonoSecurityManager* secman = NULL;
7067 MonoDeclSecurityActions actions;
7068 GSList *class_inits = NULL;
7069 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
7071 gboolean init_locals, seq_points, skip_dead_blocks;
7072 gboolean sym_seq_points = FALSE;
7073 MonoInst *cached_tls_addr = NULL;
7074 MonoDebugMethodInfo *minfo;
7075 MonoBitSet *seq_point_locs = NULL;
7076 MonoBitSet *seq_point_set_locs = NULL;
7078 cfg->disable_inline = is_jit_optimizer_disabled (method);
7080 /* serialization and xdomain stuff may need access to private fields and methods */
7081 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
7082 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
7083 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
7084 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
7085 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
7086 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
7088 dont_verify |= mono_security_smcs_hack_enabled ();
7090 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
7091 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
7092 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
7093 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
7094 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_STELEMREF;
7096 image = method->klass->image;
7097 header = mono_method_get_header (method);
7099 MonoLoaderError *error;
7101 if ((error = mono_loader_get_last_error ())) {
7102 mono_cfg_set_exception (cfg, error->exception_type);
7104 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
7105 cfg->exception_message = g_strdup_printf ("Missing or incorrect header for method %s", cfg->method->name);
7107 goto exception_exit;
7109 generic_container = mono_method_get_generic_container (method);
7110 sig = mono_method_signature (method);
7111 num_args = sig->hasthis + sig->param_count;
7112 ip = (unsigned char*)header->code;
7113 cfg->cil_start = ip;
7114 end = ip + header->code_size;
7115 cfg->stat_cil_code_size += header->code_size;
7117 seq_points = cfg->gen_seq_points && cfg->method == method;
7118 #ifdef PLATFORM_ANDROID
7119 seq_points &= cfg->method->wrapper_type == MONO_WRAPPER_NONE;
7122 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
7123 /* We could hit a seq point before attaching to the JIT (#8338) */
7127 if (cfg->gen_seq_points_debug_data && cfg->method == method) {
7128 minfo = mono_debug_lookup_method (method);
7130 int i, n_il_offsets;
7134 mono_debug_symfile_get_line_numbers_full (minfo, NULL, NULL, &n_il_offsets, &il_offsets, &line_numbers, NULL, NULL, NULL, NULL);
7135 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7136 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7137 sym_seq_points = TRUE;
7138 for (i = 0; i < n_il_offsets; ++i) {
7139 if (il_offsets [i] < header->code_size)
7140 mono_bitset_set_fast (seq_point_locs, il_offsets [i]);
7142 g_free (il_offsets);
7143 g_free (line_numbers);
7144 } else if (!method->wrapper_type && !method->dynamic && mono_debug_image_has_debug_info (method->klass->image)) {
7145 /* Methods without line number info like auto-generated property accessors */
7146 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7147 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7148 sym_seq_points = TRUE;
7153 * Methods without init_locals set could cause asserts in various passes
7154 * (#497220). To work around this, we emit dummy initialization opcodes
7155 * (OP_DUMMY_ICONST etc.) which generate no code. These are only supported
7156 * on some platforms.
7158 if ((cfg->opt & MONO_OPT_UNSAFE) && ARCH_HAVE_DUMMY_INIT)
7159 init_locals = header->init_locals;
7163 method_definition = method;
7164 while (method_definition->is_inflated) {
7165 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
7166 method_definition = imethod->declaring;
7169 /* SkipVerification is not allowed if core-clr is enabled */
7170 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
7172 dont_verify_stloc = TRUE;
7175 if (sig->is_inflated)
7176 generic_context = mono_method_get_context (method);
7177 else if (generic_container)
7178 generic_context = &generic_container->context;
7179 cfg->generic_context = generic_context;
7181 if (!cfg->generic_sharing_context)
7182 g_assert (!sig->has_type_parameters);
7184 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
7185 g_assert (method->is_inflated);
7186 g_assert (mono_method_get_context (method)->method_inst);
7188 if (method->is_inflated && mono_method_get_context (method)->method_inst)
7189 g_assert (sig->generic_param_count);
7191 if (cfg->method == method) {
7192 cfg->real_offset = 0;
7194 cfg->real_offset = inline_offset;
7197 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
7198 cfg->cil_offset_to_bb_len = header->code_size;
7200 cfg->current_method = method;
7202 if (cfg->verbose_level > 2)
7203 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
7205 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
7207 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
7208 for (n = 0; n < sig->param_count; ++n)
7209 param_types [n + sig->hasthis] = sig->params [n];
7210 cfg->arg_types = param_types;
7212 cfg->dont_inline = g_list_prepend (cfg->dont_inline, method);
7213 if (cfg->method == method) {
7215 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
7216 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
7219 NEW_BBLOCK (cfg, start_bblock);
7220 cfg->bb_entry = start_bblock;
7221 start_bblock->cil_code = NULL;
7222 start_bblock->cil_length = 0;
7223 #if defined(__native_client_codegen__)
7224 MONO_INST_NEW (cfg, ins, OP_NACL_GC_SAFE_POINT);
7225 ins->dreg = alloc_dreg (cfg, STACK_I4);
7226 MONO_ADD_INS (start_bblock, ins);
7230 NEW_BBLOCK (cfg, end_bblock);
7231 cfg->bb_exit = end_bblock;
7232 end_bblock->cil_code = NULL;
7233 end_bblock->cil_length = 0;
7234 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
7235 g_assert (cfg->num_bblocks == 2);
7237 arg_array = cfg->args;
7239 if (header->num_clauses) {
7240 cfg->spvars = g_hash_table_new (NULL, NULL);
7241 cfg->exvars = g_hash_table_new (NULL, NULL);
7243 /* handle exception clauses */
7244 for (i = 0; i < header->num_clauses; ++i) {
7245 MonoBasicBlock *try_bb;
7246 MonoExceptionClause *clause = &header->clauses [i];
7247 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
7248 try_bb->real_offset = clause->try_offset;
7249 try_bb->try_start = TRUE;
7250 try_bb->region = ((i + 1) << 8) | clause->flags;
7251 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
7252 tblock->real_offset = clause->handler_offset;
7253 tblock->flags |= BB_EXCEPTION_HANDLER;
7256 * Linking the try block with the EH block hinders inlining as we won't be able to
7257 * merge the bblocks from inlining and produce an artificial hole for no good reason.
7259 if (COMPILE_LLVM (cfg))
7260 link_bblock (cfg, try_bb, tblock);
7262 if (*(ip + clause->handler_offset) == CEE_POP)
7263 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
7265 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
7266 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
7267 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
7268 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
7269 MONO_ADD_INS (tblock, ins);
7271 if (seq_points && clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY) {
7272 /* finally clauses already have a seq point */
7273 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
7274 MONO_ADD_INS (tblock, ins);
7277 /* todo: is a fault block unsafe to optimize? */
7278 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
7279 tblock->flags |= BB_EXCEPTION_UNSAFE;
7283 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
7285 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
7287 /* catch and filter blocks get the exception object on the stack */
7288 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
7289 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
7290 MonoInst *dummy_use;
7292 /* mostly like handle_stack_args (), but just sets the input args */
7293 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
7294 tblock->in_scount = 1;
7295 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
7296 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
7299 * Add a dummy use for the exvar so its liveness info will be
7303 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
7305 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
7306 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
7307 tblock->flags |= BB_EXCEPTION_HANDLER;
7308 tblock->real_offset = clause->data.filter_offset;
7309 tblock->in_scount = 1;
7310 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
7311 /* The filter block shares the exvar with the handler block */
7312 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
7313 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
7314 MONO_ADD_INS (tblock, ins);
7318 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
7319 clause->data.catch_class &&
7320 cfg->generic_sharing_context &&
7321 mono_class_check_context_used (clause->data.catch_class)) {
7323 * In shared generic code with catch
7324 * clauses containing type variables
7325 * the exception handling code has to
7326 * be able to get to the rgctx.
7327 * Therefore we have to make sure that
7328 * the vtable/mrgctx argument (for
7329 * static or generic methods) or the
7330 * "this" argument (for non-static
7331 * methods) are live.
7333 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
7334 mini_method_get_context (method)->method_inst ||
7335 method->klass->valuetype) {
7336 mono_get_vtable_var (cfg);
7338 MonoInst *dummy_use;
7340 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
7345 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
7346 cfg->cbb = start_bblock;
7347 cfg->args = arg_array;
7348 mono_save_args (cfg, sig, inline_args);
7351 /* FIRST CODE BLOCK */
7352 NEW_BBLOCK (cfg, bblock);
7353 bblock->cil_code = ip;
7357 ADD_BBLOCK (cfg, bblock);
7359 if (cfg->method == method) {
7360 breakpoint_id = mono_debugger_method_has_breakpoint (method);
7361 if (breakpoint_id) {
7362 MONO_INST_NEW (cfg, ins, OP_BREAK);
7363 MONO_ADD_INS (bblock, ins);
7367 if (mono_security_cas_enabled ())
7368 secman = mono_security_manager_get_methods ();
7370 security = (secman && mono_security_method_has_declsec (method));
7371 /* at this point having security doesn't mean we have any code to generate */
7372 if (security && (cfg->method == method)) {
7373 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
7374 * And we do not want to enter the next section (with allocation) if we
7375 * have nothing to generate */
7376 security = mono_declsec_get_demands (method, &actions);
7379 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
7380 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
7382 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
7383 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
7384 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
7386 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
7387 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
7391 mono_custom_attrs_free (custom);
7394 custom = mono_custom_attrs_from_class (wrapped->klass);
7395 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
7399 mono_custom_attrs_free (custom);
7402 /* not a P/Invoke after all */
7407 /* we use a separate basic block for the initialization code */
7408 NEW_BBLOCK (cfg, init_localsbb);
7409 cfg->bb_init = init_localsbb;
7410 init_localsbb->real_offset = cfg->real_offset;
7411 start_bblock->next_bb = init_localsbb;
7412 init_localsbb->next_bb = bblock;
7413 link_bblock (cfg, start_bblock, init_localsbb);
7414 link_bblock (cfg, init_localsbb, bblock);
7416 cfg->cbb = init_localsbb;
7418 if (cfg->gsharedvt && cfg->method == method) {
7419 MonoGSharedVtMethodInfo *info;
7420 MonoInst *var, *locals_var;
7423 info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoGSharedVtMethodInfo));
7424 info->method = cfg->method;
7425 info->count_entries = 16;
7426 info->entries = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
7427 cfg->gsharedvt_info = info;
7429 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
7430 /* prevent it from being register allocated */
7431 //var->flags |= MONO_INST_VOLATILE;
7432 cfg->gsharedvt_info_var = var;
7434 ins = emit_get_rgctx_gsharedvt_method (cfg, mini_method_check_context_used (cfg, method), method, info);
7435 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, var->dreg, ins->dreg);
7437 /* Allocate locals */
7438 locals_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
7439 /* prevent it from being register allocated */
7440 //locals_var->flags |= MONO_INST_VOLATILE;
7441 cfg->gsharedvt_locals_var = locals_var;
7443 dreg = alloc_ireg (cfg);
7444 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, dreg, var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, locals_size));
7446 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
7447 ins->dreg = locals_var->dreg;
7449 MONO_ADD_INS (cfg->cbb, ins);
7450 cfg->gsharedvt_locals_var_ins = ins;
7452 cfg->flags |= MONO_CFG_HAS_ALLOCA;
7455 ins->flags |= MONO_INST_INIT;
7459 /* at this point we know, if security is TRUE, that some code needs to be generated */
7460 if (security && (cfg->method == method)) {
7463 cfg->stat_cas_demand_generation++;
7465 if (actions.demand.blob) {
7466 /* Add code for SecurityAction.Demand */
7467 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
7468 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
7469 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
7470 mono_emit_method_call (cfg, secman->demand, args, NULL);
7472 if (actions.noncasdemand.blob) {
7473 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
7474 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
7475 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
7476 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
7477 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
7478 mono_emit_method_call (cfg, secman->demand, args, NULL);
7480 if (actions.demandchoice.blob) {
7481 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
7482 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
7483 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
7484 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
7485 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
7489 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
7491 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
7494 if (mono_security_core_clr_enabled ()) {
7495 /* check if this is native code, e.g. an icall or a p/invoke */
7496 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
7497 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
7499 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
7500 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
7502 /* if this ia a native call then it can only be JITted from platform code */
7503 if ((icall || pinvk) && method->klass && method->klass->image) {
7504 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
7505 MonoException *ex = icall ? mono_get_exception_security () :
7506 mono_get_exception_method_access ();
7507 emit_throw_exception (cfg, ex);
7514 CHECK_CFG_EXCEPTION;
7516 if (header->code_size == 0)
7519 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
7524 if (cfg->method == method)
7525 mono_debug_init_method (cfg, bblock, breakpoint_id);
7527 for (n = 0; n < header->num_locals; ++n) {
7528 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
7533 /* We force the vtable variable here for all shared methods
7534 for the possibility that they might show up in a stack
7535 trace where their exact instantiation is needed. */
7536 if (cfg->generic_sharing_context && method == cfg->method) {
7537 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
7538 mini_method_get_context (method)->method_inst ||
7539 method->klass->valuetype) {
7540 mono_get_vtable_var (cfg);
7542 /* FIXME: Is there a better way to do this?
7543 We need the variable live for the duration
7544 of the whole method. */
7545 cfg->args [0]->flags |= MONO_INST_VOLATILE;
7549 /* add a check for this != NULL to inlined methods */
7550 if (is_virtual_call) {
7553 NEW_ARGLOAD (cfg, arg_ins, 0);
7554 MONO_ADD_INS (cfg->cbb, arg_ins);
7555 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
7558 skip_dead_blocks = !dont_verify;
7559 if (skip_dead_blocks) {
7560 original_bb = bb = mono_basic_block_split (method, &cfg->error);
7565 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
7566 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
7569 start_new_bblock = 0;
7572 if (cfg->method == method)
7573 cfg->real_offset = ip - header->code;
7575 cfg->real_offset = inline_offset;
7580 if (start_new_bblock) {
7581 bblock->cil_length = ip - bblock->cil_code;
7582 if (start_new_bblock == 2) {
7583 g_assert (ip == tblock->cil_code);
7585 GET_BBLOCK (cfg, tblock, ip);
7587 bblock->next_bb = tblock;
7590 start_new_bblock = 0;
7591 for (i = 0; i < bblock->in_scount; ++i) {
7592 if (cfg->verbose_level > 3)
7593 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
7594 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
7598 g_slist_free (class_inits);
7601 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
7602 link_bblock (cfg, bblock, tblock);
7603 if (sp != stack_start) {
7604 handle_stack_args (cfg, stack_start, sp - stack_start);
7606 CHECK_UNVERIFIABLE (cfg);
7608 bblock->next_bb = tblock;
7611 for (i = 0; i < bblock->in_scount; ++i) {
7612 if (cfg->verbose_level > 3)
7613 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
7614 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
7617 g_slist_free (class_inits);
7622 if (skip_dead_blocks) {
7623 int ip_offset = ip - header->code;
7625 if (ip_offset == bb->end)
7629 int op_size = mono_opcode_size (ip, end);
7630 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
7632 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
7634 if (ip_offset + op_size == bb->end) {
7635 MONO_INST_NEW (cfg, ins, OP_NOP);
7636 MONO_ADD_INS (bblock, ins);
7637 start_new_bblock = 1;
7645 * Sequence points are points where the debugger can place a breakpoint.
7646 * Currently, we generate these automatically at points where the IL
7649 if (seq_points && ((!sym_seq_points && (sp == stack_start)) || (sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code)))) {
7651 * Make methods interruptable at the beginning, and at the targets of
7652 * backward branches.
7653 * Also, do this at the start of every bblock in methods with clauses too,
7654 * to be able to handle instructions with inprecise control flow like
7656 * Backward branches are handled at the end of method-to-ir ().
7658 gboolean intr_loc = ip == header->code || (!cfg->cbb->last_ins && cfg->header->num_clauses);
7660 /* Avoid sequence points on empty IL like .volatile */
7661 // FIXME: Enable this
7662 //if (!(cfg->cbb->last_ins && cfg->cbb->last_ins->opcode == OP_SEQ_POINT)) {
7663 NEW_SEQ_POINT (cfg, ins, ip - header->code, intr_loc);
7664 if (sp != stack_start)
7665 ins->flags |= MONO_INST_NONEMPTY_STACK;
7666 MONO_ADD_INS (cfg->cbb, ins);
7669 mono_bitset_set_fast (seq_point_set_locs, ip - header->code);
7672 bblock->real_offset = cfg->real_offset;
7674 if ((cfg->method == method) && cfg->coverage_info) {
7675 guint32 cil_offset = ip - header->code;
7676 cfg->coverage_info->data [cil_offset].cil_code = ip;
7678 /* TODO: Use an increment here */
7679 #if defined(TARGET_X86)
7680 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
7681 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
7683 MONO_ADD_INS (cfg->cbb, ins);
7685 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
7686 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
7690 if (cfg->verbose_level > 3)
7691 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
7695 if (seq_points && !sym_seq_points && sp != stack_start) {
7697 * The C# compiler uses these nops to notify the JIT that it should
7698 * insert seq points.
7700 NEW_SEQ_POINT (cfg, ins, ip - header->code, FALSE);
7701 MONO_ADD_INS (cfg->cbb, ins);
7703 if (cfg->keep_cil_nops)
7704 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
7706 MONO_INST_NEW (cfg, ins, OP_NOP);
7708 MONO_ADD_INS (bblock, ins);
7711 if (should_insert_brekpoint (cfg->method)) {
7712 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
7714 MONO_INST_NEW (cfg, ins, OP_NOP);
7717 MONO_ADD_INS (bblock, ins);
7723 CHECK_STACK_OVF (1);
7724 n = (*ip)-CEE_LDARG_0;
7726 EMIT_NEW_ARGLOAD (cfg, ins, n);
7734 CHECK_STACK_OVF (1);
7735 n = (*ip)-CEE_LDLOC_0;
7737 EMIT_NEW_LOCLOAD (cfg, ins, n);
7746 n = (*ip)-CEE_STLOC_0;
7749 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
7751 emit_stloc_ir (cfg, sp, header, n);
7758 CHECK_STACK_OVF (1);
7761 EMIT_NEW_ARGLOAD (cfg, ins, n);
7767 CHECK_STACK_OVF (1);
7770 NEW_ARGLOADA (cfg, ins, n);
7771 MONO_ADD_INS (cfg->cbb, ins);
7781 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
7783 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
7788 CHECK_STACK_OVF (1);
7791 EMIT_NEW_LOCLOAD (cfg, ins, n);
7795 case CEE_LDLOCA_S: {
7796 unsigned char *tmp_ip;
7798 CHECK_STACK_OVF (1);
7799 CHECK_LOCAL (ip [1]);
7801 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
7807 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
7816 CHECK_LOCAL (ip [1]);
7817 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
7819 emit_stloc_ir (cfg, sp, header, ip [1]);
7824 CHECK_STACK_OVF (1);
7825 EMIT_NEW_PCONST (cfg, ins, NULL);
7826 ins->type = STACK_OBJ;
7831 CHECK_STACK_OVF (1);
7832 EMIT_NEW_ICONST (cfg, ins, -1);
7845 CHECK_STACK_OVF (1);
7846 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
7852 CHECK_STACK_OVF (1);
7854 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
7860 CHECK_STACK_OVF (1);
7861 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
7867 CHECK_STACK_OVF (1);
7868 MONO_INST_NEW (cfg, ins, OP_I8CONST);
7869 ins->type = STACK_I8;
7870 ins->dreg = alloc_dreg (cfg, STACK_I8);
7872 ins->inst_l = (gint64)read64 (ip);
7873 MONO_ADD_INS (bblock, ins);
7879 gboolean use_aotconst = FALSE;
7881 #ifdef TARGET_POWERPC
7882 /* FIXME: Clean this up */
7883 if (cfg->compile_aot)
7884 use_aotconst = TRUE;
7887 /* FIXME: we should really allocate this only late in the compilation process */
7888 f = mono_domain_alloc (cfg->domain, sizeof (float));
7890 CHECK_STACK_OVF (1);
7896 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
7898 dreg = alloc_freg (cfg);
7899 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
7900 ins->type = STACK_R8;
7902 MONO_INST_NEW (cfg, ins, OP_R4CONST);
7903 ins->type = STACK_R8;
7904 ins->dreg = alloc_dreg (cfg, STACK_R8);
7906 MONO_ADD_INS (bblock, ins);
7916 gboolean use_aotconst = FALSE;
7918 #ifdef TARGET_POWERPC
7919 /* FIXME: Clean this up */
7920 if (cfg->compile_aot)
7921 use_aotconst = TRUE;
7924 /* FIXME: we should really allocate this only late in the compilation process */
7925 d = mono_domain_alloc (cfg->domain, sizeof (double));
7927 CHECK_STACK_OVF (1);
7933 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
7935 dreg = alloc_freg (cfg);
7936 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
7937 ins->type = STACK_R8;
7939 MONO_INST_NEW (cfg, ins, OP_R8CONST);
7940 ins->type = STACK_R8;
7941 ins->dreg = alloc_dreg (cfg, STACK_R8);
7943 MONO_ADD_INS (bblock, ins);
7952 MonoInst *temp, *store;
7954 CHECK_STACK_OVF (1);
7958 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
7959 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
7961 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
7964 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
7977 if (sp [0]->type == STACK_R8)
7978 /* we need to pop the value from the x86 FP stack */
7979 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
7985 INLINE_FAILURE ("jmp");
7986 GSHAREDVT_FAILURE (*ip);
7989 if (stack_start != sp)
7991 token = read32 (ip + 1);
7992 /* FIXME: check the signature matches */
7993 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7995 if (!cmethod || mono_loader_get_last_error ())
7998 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
7999 GENERIC_SHARING_FAILURE (CEE_JMP);
8001 if (mono_security_cas_enabled ())
8002 CHECK_CFG_EXCEPTION;
8004 emit_instrumentation_call (cfg, mono_profiler_method_leave);
8006 if (ARCH_HAVE_OP_TAIL_CALL) {
8007 MonoMethodSignature *fsig = mono_method_signature (cmethod);
8010 /* Handle tail calls similarly to calls */
8011 n = fsig->param_count + fsig->hasthis;
8015 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
8016 call->method = cmethod;
8017 call->tail_call = TRUE;
8018 call->signature = mono_method_signature (cmethod);
8019 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
8020 call->inst.inst_p0 = cmethod;
8021 for (i = 0; i < n; ++i)
8022 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
8024 mono_arch_emit_call (cfg, call);
8025 cfg->param_area = MAX(cfg->param_area, call->stack_usage);
8026 MONO_ADD_INS (bblock, (MonoInst*)call);
8028 for (i = 0; i < num_args; ++i)
8029 /* Prevent arguments from being optimized away */
8030 arg_array [i]->flags |= MONO_INST_VOLATILE;
8032 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
8033 ins = (MonoInst*)call;
8034 ins->inst_p0 = cmethod;
8035 MONO_ADD_INS (bblock, ins);
8039 start_new_bblock = 1;
8044 case CEE_CALLVIRT: {
8045 MonoInst *addr = NULL;
8046 MonoMethodSignature *fsig = NULL;
8048 int virtual = *ip == CEE_CALLVIRT;
8049 int calli = *ip == CEE_CALLI;
8050 gboolean pass_imt_from_rgctx = FALSE;
8051 MonoInst *imt_arg = NULL;
8052 MonoInst *keep_this_alive = NULL;
8053 gboolean pass_vtable = FALSE;
8054 gboolean pass_mrgctx = FALSE;
8055 MonoInst *vtable_arg = NULL;
8056 gboolean check_this = FALSE;
8057 gboolean supported_tail_call = FALSE;
8058 gboolean tail_call = FALSE;
8059 gboolean need_seq_point = FALSE;
8060 guint32 call_opcode = *ip;
8061 gboolean emit_widen = TRUE;
8062 gboolean push_res = TRUE;
8063 gboolean skip_ret = FALSE;
8064 gboolean delegate_invoke = FALSE;
8067 token = read32 (ip + 1);
8072 //GSHAREDVT_FAILURE (*ip);
8077 fsig = mini_get_signature (method, token, generic_context);
8078 n = fsig->param_count + fsig->hasthis;
8080 if (method->dynamic && fsig->pinvoke) {
8084 * This is a call through a function pointer using a pinvoke
8085 * signature. Have to create a wrapper and call that instead.
8086 * FIXME: This is very slow, need to create a wrapper at JIT time
8087 * instead based on the signature.
8089 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
8090 EMIT_NEW_PCONST (cfg, args [1], fsig);
8092 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
8095 MonoMethod *cil_method;
8097 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
8098 cil_method = cmethod;
8100 if (constrained_call) {
8101 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8102 if (cfg->verbose_level > 2)
8103 printf ("DM Constrained call to %s\n", mono_type_get_full_name (constrained_call));
8104 if (!((constrained_call->byval_arg.type == MONO_TYPE_VAR ||
8105 constrained_call->byval_arg.type == MONO_TYPE_MVAR) &&
8106 cfg->generic_sharing_context)) {
8107 cmethod = mono_get_method_constrained_with_method (image, cil_method, constrained_call, generic_context, &cfg->error);
8111 if (cfg->verbose_level > 2)
8112 printf ("Constrained call to %s\n", mono_type_get_full_name (constrained_call));
8114 if ((constrained_call->byval_arg.type == MONO_TYPE_VAR || constrained_call->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
8116 * This is needed since get_method_constrained can't find
8117 * the method in klass representing a type var.
8118 * The type var is guaranteed to be a reference type in this
8121 if (!mini_is_gsharedvt_klass (cfg, constrained_call))
8122 g_assert (!cmethod->klass->valuetype);
8124 cmethod = mono_get_method_constrained_checked (image, token, constrained_call, generic_context, &cil_method, &cfg->error);
8130 if (!cmethod || mono_loader_get_last_error ())
8132 if (!dont_verify && !cfg->skip_visibility) {
8133 MonoMethod *target_method = cil_method;
8134 if (method->is_inflated) {
8135 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
8137 if (!mono_method_can_access_method (method_definition, target_method) &&
8138 !mono_method_can_access_method (method, cil_method))
8139 METHOD_ACCESS_FAILURE (method, cil_method);
8142 if (mono_security_core_clr_enabled ())
8143 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
8145 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
8146 /* MS.NET seems to silently convert this to a callvirt */
8151 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
8152 * converts to a callvirt.
8154 * tests/bug-515884.il is an example of this behavior
8156 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
8157 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
8158 if (!virtual && mono_class_is_marshalbyref (cmethod->klass) && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
8162 if (!cmethod->klass->inited)
8163 if (!mono_class_init (cmethod->klass))
8164 TYPE_LOAD_ERROR (cmethod->klass);
8166 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
8167 mini_class_is_system_array (cmethod->klass)) {
8168 array_rank = cmethod->klass->rank;
8169 fsig = mono_method_signature (cmethod);
8171 fsig = mono_method_signature (cmethod);
8176 if (fsig->pinvoke) {
8177 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
8178 check_for_pending_exc, cfg->compile_aot);
8179 fsig = mono_method_signature (wrapper);
8180 } else if (constrained_call) {
8181 fsig = mono_method_signature (cmethod);
8183 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
8188 mono_save_token_info (cfg, image, token, cil_method);
8190 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip + 5 - header->code)))
8191 need_seq_point = TRUE;
8193 n = fsig->param_count + fsig->hasthis;
8195 /* Don't support calls made using type arguments for now */
8197 if (cfg->gsharedvt) {
8198 if (mini_is_gsharedvt_signature (cfg, fsig))
8199 GSHAREDVT_FAILURE (*ip);
8203 if (mono_security_cas_enabled ()) {
8204 if (check_linkdemand (cfg, method, cmethod))
8205 INLINE_FAILURE ("linkdemand");
8206 CHECK_CFG_EXCEPTION;
8209 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
8210 g_assert_not_reached ();
8213 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
8216 if (!cfg->generic_sharing_context && cmethod)
8217 g_assert (!mono_method_check_context_used (cmethod));
8221 //g_assert (!virtual || fsig->hasthis);
8225 if (constrained_call) {
8226 if (mini_is_gsharedvt_klass (cfg, constrained_call)) {
8228 * Constrained calls need to behave differently at runtime dependending on whenever the receiver is instantiated as ref type or as a vtype.
8230 if ((cmethod->klass != mono_defaults.object_class) && constrained_call->valuetype && cmethod->klass->valuetype) {
8231 /* The 'Own method' case below */
8232 } else if (cmethod->klass->image != mono_defaults.corlib && !(cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) && !cmethod->klass->valuetype) {
8233 /* 'The type parameter is instantiated as a reference type' case below. */
8234 } else if (((cmethod->klass == mono_defaults.object_class) || (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) || (!cmethod->klass->valuetype && cmethod->klass->image != mono_defaults.corlib)) &&
8235 (MONO_TYPE_IS_VOID (fsig->ret) || MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_IS_REFERENCE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret) || mini_is_gsharedvt_type (cfg, fsig->ret)) &&
8236 (fsig->param_count == 0 || (!fsig->hasthis && fsig->param_count == 1) || (fsig->param_count == 1 && (MONO_TYPE_IS_REFERENCE (fsig->params [0]) || mini_is_gsharedvt_type (cfg, fsig->params [0]))))) {
8237 MonoInst *args [16];
8240 * This case handles calls to
8241 * - object:ToString()/Equals()/GetHashCode(),
8242 * - System.IComparable<T>:CompareTo()
8243 * - System.IEquatable<T>:Equals ()
8244 * plus some simple interface calls enough to support AsyncTaskMethodBuilder.
8248 if (mono_method_check_context_used (cmethod))
8249 args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (cmethod), cmethod, MONO_RGCTX_INFO_METHOD);
8251 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
8252 args [2] = emit_get_rgctx_klass (cfg, mono_class_check_context_used (constrained_call), constrained_call, MONO_RGCTX_INFO_KLASS);
8254 /* !fsig->hasthis is for the wrapper for the Object.GetType () icall */
8255 if (fsig->hasthis && fsig->param_count) {
8256 /* Pass the arguments using a localloc-ed array using the format expected by runtime_invoke () */
8257 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
8258 ins->dreg = alloc_preg (cfg);
8259 ins->inst_imm = fsig->param_count * sizeof (mgreg_t);
8260 MONO_ADD_INS (cfg->cbb, ins);
8263 if (mini_is_gsharedvt_type (cfg, fsig->params [0])) {
8266 args [3] = emit_get_gsharedvt_info_klass (cfg, mono_class_from_mono_type (fsig->params [0]), MONO_RGCTX_INFO_CLASS_BOX_TYPE);
8268 EMIT_NEW_VARLOADA_VREG (cfg, ins, sp [1]->dreg, fsig->params [0]);
8269 addr_reg = ins->dreg;
8270 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, addr_reg);
8272 EMIT_NEW_ICONST (cfg, args [3], 0);
8273 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, sp [1]->dreg);
8276 EMIT_NEW_ICONST (cfg, args [3], 0);
8277 EMIT_NEW_ICONST (cfg, args [4], 0);
8279 ins = mono_emit_jit_icall (cfg, mono_gsharedvt_constrained_call, args);
8282 if (mini_is_gsharedvt_type (cfg, fsig->ret)) {
8283 ins = handle_unbox_gsharedvt (cfg, mono_class_from_mono_type (fsig->ret), ins, &bblock);
8284 } else if (MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret)) {
8288 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), ins->dreg, sizeof (MonoObject));
8289 MONO_ADD_INS (cfg->cbb, add);
8291 NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, add->dreg, 0);
8292 MONO_ADD_INS (cfg->cbb, ins);
8293 /* ins represents the call result */
8298 GSHAREDVT_FAILURE (*ip);
8302 * We have the `constrained.' prefix opcode.
8304 if (constrained_call->valuetype && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
8306 * The type parameter is instantiated as a valuetype,
8307 * but that type doesn't override the method we're
8308 * calling, so we need to box `this'.
8310 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
8311 ins->klass = constrained_call;
8312 sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call), &bblock);
8313 CHECK_CFG_EXCEPTION;
8314 } else if (!constrained_call->valuetype) {
8315 int dreg = alloc_ireg_ref (cfg);
8318 * The type parameter is instantiated as a reference
8319 * type. We have a managed pointer on the stack, so
8320 * we need to dereference it here.
8322 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
8323 ins->type = STACK_OBJ;
8326 if (cmethod->klass->valuetype) {
8329 /* Interface method */
8332 mono_class_setup_vtable (constrained_call);
8333 CHECK_TYPELOAD (constrained_call);
8334 ioffset = mono_class_interface_offset (constrained_call, cmethod->klass);
8336 TYPE_LOAD_ERROR (constrained_call);
8337 slot = mono_method_get_vtable_slot (cmethod);
8339 TYPE_LOAD_ERROR (cmethod->klass);
8340 cmethod = constrained_call->vtable [ioffset + slot];
8342 if (cmethod->klass == mono_defaults.enum_class) {
8343 /* Enum implements some interfaces, so treat this as the first case */
8344 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
8345 ins->klass = constrained_call;
8346 sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call), &bblock);
8347 CHECK_CFG_EXCEPTION;
8352 constrained_call = NULL;
8355 if (!calli && check_call_signature (cfg, fsig, sp))
8358 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
8359 if (cmethod && (cmethod->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (cmethod->name, "Invoke"))
8360 delegate_invoke = TRUE;
8363 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_sharable_method (cfg, cmethod, fsig, sp))) {
8365 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8366 type_to_eval_stack_type ((cfg), fsig->ret, ins);
8374 * If the callee is a shared method, then its static cctor
8375 * might not get called after the call was patched.
8377 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
8378 emit_generic_class_init (cfg, cmethod->klass);
8379 CHECK_TYPELOAD (cmethod->klass);
8383 check_method_sharing (cfg, cmethod, &pass_vtable, &pass_mrgctx);
8385 if (cfg->generic_sharing_context && cmethod) {
8386 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
8388 context_used = mini_method_check_context_used (cfg, cmethod);
8390 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
8391 /* Generic method interface
8392 calls are resolved via a
8393 helper function and don't
8395 if (!cmethod_context || !cmethod_context->method_inst)
8396 pass_imt_from_rgctx = TRUE;
8400 * If a shared method calls another
8401 * shared method then the caller must
8402 * have a generic sharing context
8403 * because the magic trampoline
8404 * requires it. FIXME: We shouldn't
8405 * have to force the vtable/mrgctx
8406 * variable here. Instead there
8407 * should be a flag in the cfg to
8408 * request a generic sharing context.
8411 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
8412 mono_get_vtable_var (cfg);
8417 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
8419 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
8421 CHECK_TYPELOAD (cmethod->klass);
8422 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
8427 g_assert (!vtable_arg);
8429 if (!cfg->compile_aot) {
8431 * emit_get_rgctx_method () calls mono_class_vtable () so check
8432 * for type load errors before.
8434 mono_class_setup_vtable (cmethod->klass);
8435 CHECK_TYPELOAD (cmethod->klass);
8438 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
8440 /* !marshalbyref is needed to properly handle generic methods + remoting */
8441 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
8442 MONO_METHOD_IS_FINAL (cmethod)) &&
8443 !mono_class_is_marshalbyref (cmethod->klass)) {
8450 if (pass_imt_from_rgctx) {
8451 g_assert (!pass_vtable);
8454 imt_arg = emit_get_rgctx_method (cfg, context_used,
8455 cmethod, MONO_RGCTX_INFO_METHOD);
8459 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
8461 /* Calling virtual generic methods */
8462 if (cmethod && virtual &&
8463 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
8464 !(MONO_METHOD_IS_FINAL (cmethod) &&
8465 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
8466 fsig->generic_param_count &&
8467 !(cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))) {
8468 MonoInst *this_temp, *this_arg_temp, *store;
8469 MonoInst *iargs [4];
8470 gboolean use_imt = FALSE;
8472 g_assert (fsig->is_inflated);
8474 /* Prevent inlining of methods that contain indirect calls */
8475 INLINE_FAILURE ("virtual generic call");
8477 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))
8478 GSHAREDVT_FAILURE (*ip);
8480 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK && defined(MONO_ARCH_GSHARED_SUPPORTED)
8481 if (cmethod->wrapper_type == MONO_WRAPPER_NONE && mono_use_imt)
8486 g_assert (!imt_arg);
8488 g_assert (cmethod->is_inflated);
8489 imt_arg = emit_get_rgctx_method (cfg, context_used,
8490 cmethod, MONO_RGCTX_INFO_METHOD);
8491 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, sp [0], imt_arg, NULL);
8493 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
8494 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
8495 MONO_ADD_INS (bblock, store);
8497 /* FIXME: This should be a managed pointer */
8498 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
8500 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
8501 iargs [1] = emit_get_rgctx_method (cfg, context_used,
8502 cmethod, MONO_RGCTX_INFO_METHOD);
8503 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
8504 addr = mono_emit_jit_icall (cfg,
8505 mono_helper_compile_generic_method, iargs);
8507 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
8509 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8516 * Implement a workaround for the inherent races involved in locking:
8522 * If a thread abort happens between the call to Monitor.Enter () and the start of the
8523 * try block, the Exit () won't be executed, see:
8524 * http://www.bluebytesoftware.com/blog/2007/01/30/MonitorEnterThreadAbortsAndOrphanedLocks.aspx
8525 * To work around this, we extend such try blocks to include the last x bytes
8526 * of the Monitor.Enter () call.
8528 if (cmethod && cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
8529 MonoBasicBlock *tbb;
8531 GET_BBLOCK (cfg, tbb, ip + 5);
8533 * Only extend try blocks with a finally, to avoid catching exceptions thrown
8534 * from Monitor.Enter like ArgumentNullException.
8536 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
8537 /* Mark this bblock as needing to be extended */
8538 tbb->extend_try_block = TRUE;
8542 /* Conversion to a JIT intrinsic */
8543 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
8545 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8546 type_to_eval_stack_type ((cfg), fsig->ret, ins);
8553 if (cmethod && (cfg->opt & MONO_OPT_INLINE) &&
8554 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
8555 mono_method_check_inlining (cfg, cmethod)) {
8557 gboolean always = FALSE;
8559 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
8560 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
8561 /* Prevent inlining of methods that call wrappers */
8562 INLINE_FAILURE ("wrapper call");
8563 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
8567 costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, always, &bblock);
8569 cfg->real_offset += 5;
8571 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8572 /* *sp is already set by inline_method */
8577 inline_costs += costs;
8583 /* Tail recursion elimination */
8584 if ((cfg->opt & MONO_OPT_TAILC) && call_opcode == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
8585 gboolean has_vtargs = FALSE;
8588 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
8589 INLINE_FAILURE ("tail call");
8591 /* keep it simple */
8592 for (i = fsig->param_count - 1; i >= 0; i--) {
8593 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
8598 for (i = 0; i < n; ++i)
8599 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
8600 MONO_INST_NEW (cfg, ins, OP_BR);
8601 MONO_ADD_INS (bblock, ins);
8602 tblock = start_bblock->out_bb [0];
8603 link_bblock (cfg, bblock, tblock);
8604 ins->inst_target_bb = tblock;
8605 start_new_bblock = 1;
8607 /* skip the CEE_RET, too */
8608 if (ip_in_bb (cfg, bblock, ip + 5))
8615 inline_costs += 10 * num_calls++;
8618 * Making generic calls out of gsharedvt methods.
8619 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
8620 * patching gshared method addresses into a gsharedvt method.
8622 if (cmethod && cfg->gsharedvt && (mini_is_gsharedvt_signature (cfg, fsig) || cmethod->is_inflated || cmethod->klass->generic_class)) {
8623 MonoRgctxInfoType info_type;
8626 //if (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)
8627 //GSHAREDVT_FAILURE (*ip);
8628 // disable for possible remoting calls
8629 if (fsig->hasthis && (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class))
8630 GSHAREDVT_FAILURE (*ip);
8631 if (fsig->generic_param_count) {
8632 /* virtual generic call */
8633 g_assert (mono_use_imt);
8634 g_assert (!imt_arg);
8635 /* Same as the virtual generic case above */
8636 imt_arg = emit_get_rgctx_method (cfg, context_used,
8637 cmethod, MONO_RGCTX_INFO_METHOD);
8638 /* This is not needed, as the trampoline code will pass one, and it might be passed in the same reg as the imt arg */
8640 } else if ((cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) && !imt_arg) {
8641 /* This can happen when we call a fully instantiated iface method */
8642 imt_arg = emit_get_rgctx_method (cfg, context_used,
8643 cmethod, MONO_RGCTX_INFO_METHOD);
8648 if (cmethod->klass->rank && cmethod->klass->byval_arg.type != MONO_TYPE_SZARRAY)
8649 /* test_0_multi_dim_arrays () in gshared.cs */
8650 GSHAREDVT_FAILURE (*ip);
8652 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (cmethod->name, "Invoke")))
8653 keep_this_alive = sp [0];
8655 if (virtual && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))
8656 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE_VIRT;
8658 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE;
8659 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, info_type);
8661 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
8663 } else if (calli && cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
8665 * We pass the address to the gsharedvt trampoline in the rgctx reg
8667 MonoInst *callee = addr;
8669 if (method->wrapper_type != MONO_WRAPPER_DELEGATE_INVOKE)
8671 GSHAREDVT_FAILURE (*ip);
8673 addr = emit_get_rgctx_sig (cfg, context_used,
8674 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
8675 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, callee);
8679 /* Generic sharing */
8682 * Use this if the callee is gsharedvt sharable too, since
8683 * at runtime we might find an instantiation so the call cannot
8684 * be patched (the 'no_patch' code path in mini-trampolines.c).
8686 if (context_used && !imt_arg && !array_rank && !delegate_invoke &&
8687 (!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
8688 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
8689 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
8690 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
8691 INLINE_FAILURE ("gshared");
8693 g_assert (cfg->generic_sharing_context && cmethod);
8697 * We are compiling a call to a
8698 * generic method from shared code,
8699 * which means that we have to look up
8700 * the method in the rgctx and do an
8704 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
8706 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
8707 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
8711 /* Indirect calls */
8713 if (call_opcode == CEE_CALL)
8714 g_assert (context_used);
8715 else if (call_opcode == CEE_CALLI)
8716 g_assert (!vtable_arg);
8718 /* FIXME: what the hell is this??? */
8719 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
8720 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
8722 /* Prevent inlining of methods with indirect calls */
8723 INLINE_FAILURE ("indirect call");
8725 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST || addr->opcode == OP_GOT_ENTRY) {
8730 * Instead of emitting an indirect call, emit a direct call
8731 * with the contents of the aotconst as the patch info.
8733 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST) {
8734 info_type = addr->inst_c1;
8735 info_data = addr->inst_p0;
8737 info_type = addr->inst_right->inst_c1;
8738 info_data = addr->inst_right->inst_left;
8741 if (info_type == MONO_PATCH_INFO_ICALL_ADDR || info_type == MONO_PATCH_INFO_JIT_ICALL_ADDR) {
8742 ins = (MonoInst*)mono_emit_abs_call (cfg, info_type, info_data, fsig, sp);
8747 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
8755 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
8756 MonoInst *val = sp [fsig->param_count];
8758 if (val->type == STACK_OBJ) {
8759 MonoInst *iargs [2];
8764 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
8767 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
8768 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, val->dreg);
8769 if (cfg->gen_write_barriers && val->type == STACK_OBJ && !(val->opcode == OP_PCONST && val->inst_c0 == 0))
8770 emit_write_barrier (cfg, addr, val);
8771 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
8772 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
8774 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
8775 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
8776 if (!cmethod->klass->element_class->valuetype && !readonly)
8777 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
8778 CHECK_TYPELOAD (cmethod->klass);
8781 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
8784 g_assert_not_reached ();
8791 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
8795 /* Tail prefix / tail call optimization */
8797 /* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests */
8798 /* FIXME: runtime generic context pointer for jumps? */
8799 /* FIXME: handle this for generic sharing eventually */
8800 if (cmethod && (ins_flag & MONO_INST_TAILCALL) &&
8801 !vtable_arg && !cfg->generic_sharing_context && is_supported_tail_call (cfg, method, cmethod, fsig, call_opcode))
8802 supported_tail_call = TRUE;
8804 if (supported_tail_call) {
8807 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
8808 INLINE_FAILURE ("tail call");
8810 //printf ("HIT: %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
8812 if (ARCH_HAVE_OP_TAIL_CALL) {
8813 /* Handle tail calls similarly to normal calls */
8816 emit_instrumentation_call (cfg, mono_profiler_method_leave);
8818 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
8819 call->tail_call = TRUE;
8820 call->method = cmethod;
8821 call->signature = mono_method_signature (cmethod);
8824 * We implement tail calls by storing the actual arguments into the
8825 * argument variables, then emitting a CEE_JMP.
8827 for (i = 0; i < n; ++i) {
8828 /* Prevent argument from being register allocated */
8829 arg_array [i]->flags |= MONO_INST_VOLATILE;
8830 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
8832 ins = (MonoInst*)call;
8833 ins->inst_p0 = cmethod;
8834 ins->inst_p1 = arg_array [0];
8835 MONO_ADD_INS (bblock, ins);
8836 link_bblock (cfg, bblock, end_bblock);
8837 start_new_bblock = 1;
8839 // FIXME: Eliminate unreachable epilogs
8842 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
8843 * only reachable from this call.
8845 GET_BBLOCK (cfg, tblock, ip + 5);
8846 if (tblock == bblock || tblock->in_count == 0)
8855 * Synchronized wrappers.
8856 * Its hard to determine where to replace a method with its synchronized
8857 * wrapper without causing an infinite recursion. The current solution is
8858 * to add the synchronized wrapper in the trampolines, and to
8859 * change the called method to a dummy wrapper, and resolve that wrapper
8860 * to the real method in mono_jit_compile_method ().
8862 if (cfg->method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
8863 MonoMethod *orig = mono_marshal_method_from_wrapper (cfg->method);
8864 if (cmethod == orig || (cmethod->is_inflated && mono_method_get_declaring_generic_method (cmethod) == orig))
8865 cmethod = mono_marshal_get_synchronized_inner_wrapper (cmethod);
8869 INLINE_FAILURE ("call");
8870 ins = mono_emit_method_call_full (cfg, cmethod, fsig, tail_call, sp, virtual ? sp [0] : NULL,
8871 imt_arg, vtable_arg);
8874 link_bblock (cfg, bblock, end_bblock);
8875 start_new_bblock = 1;
8877 // FIXME: Eliminate unreachable epilogs
8880 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
8881 * only reachable from this call.
8883 GET_BBLOCK (cfg, tblock, ip + 5);
8884 if (tblock == bblock || tblock->in_count == 0)
8891 /* End of call, INS should contain the result of the call, if any */
8893 if (push_res && !MONO_TYPE_IS_VOID (fsig->ret)) {
8896 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
8901 if (keep_this_alive) {
8902 MonoInst *dummy_use;
8904 /* See mono_emit_method_call_full () */
8905 EMIT_NEW_DUMMY_USE (cfg, dummy_use, keep_this_alive);
8908 CHECK_CFG_EXCEPTION;
8912 g_assert (*ip == CEE_RET);
8916 constrained_call = NULL;
8918 emit_seq_point (cfg, method, ip, FALSE, TRUE);
8922 if (cfg->method != method) {
8923 /* return from inlined method */
8925 * If in_count == 0, that means the ret is unreachable due to
8926 * being preceeded by a throw. In that case, inline_method () will
8927 * handle setting the return value
8928 * (test case: test_0_inline_throw ()).
8930 if (return_var && cfg->cbb->in_count) {
8931 MonoType *ret_type = mono_method_signature (method)->ret;
8937 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
8940 //g_assert (returnvar != -1);
8941 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
8942 cfg->ret_var_set = TRUE;
8945 emit_instrumentation_call (cfg, mono_profiler_method_leave);
8947 if (cfg->lmf_var && cfg->cbb->in_count)
8951 MonoType *ret_type = mini_replace_type (mono_method_signature (method)->ret);
8953 if (seq_points && !sym_seq_points) {
8955 * Place a seq point here too even through the IL stack is not
8956 * empty, so a step over on
8959 * will work correctly.
8961 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
8962 MONO_ADD_INS (cfg->cbb, ins);
8965 g_assert (!return_var);
8969 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
8972 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
8975 if (!cfg->vret_addr) {
8978 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
8980 EMIT_NEW_RETLOADA (cfg, ret_addr);
8982 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
8983 ins->klass = mono_class_from_mono_type (ret_type);
8986 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
8987 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
8988 MonoInst *iargs [1];
8992 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
8993 mono_arch_emit_setret (cfg, method, conv);
8995 mono_arch_emit_setret (cfg, method, *sp);
8998 mono_arch_emit_setret (cfg, method, *sp);
9003 if (sp != stack_start)
9005 MONO_INST_NEW (cfg, ins, OP_BR);
9007 ins->inst_target_bb = end_bblock;
9008 MONO_ADD_INS (bblock, ins);
9009 link_bblock (cfg, bblock, end_bblock);
9010 start_new_bblock = 1;
9014 MONO_INST_NEW (cfg, ins, OP_BR);
9016 target = ip + 1 + (signed char)(*ip);
9018 GET_BBLOCK (cfg, tblock, target);
9019 link_bblock (cfg, bblock, tblock);
9020 ins->inst_target_bb = tblock;
9021 if (sp != stack_start) {
9022 handle_stack_args (cfg, stack_start, sp - stack_start);
9024 CHECK_UNVERIFIABLE (cfg);
9026 MONO_ADD_INS (bblock, ins);
9027 start_new_bblock = 1;
9028 inline_costs += BRANCH_COST;
9042 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
9044 target = ip + 1 + *(signed char*)ip;
9050 inline_costs += BRANCH_COST;
9054 MONO_INST_NEW (cfg, ins, OP_BR);
9057 target = ip + 4 + (gint32)read32(ip);
9059 GET_BBLOCK (cfg, tblock, target);
9060 link_bblock (cfg, bblock, tblock);
9061 ins->inst_target_bb = tblock;
9062 if (sp != stack_start) {
9063 handle_stack_args (cfg, stack_start, sp - stack_start);
9065 CHECK_UNVERIFIABLE (cfg);
9068 MONO_ADD_INS (bblock, ins);
9070 start_new_bblock = 1;
9071 inline_costs += BRANCH_COST;
9078 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
9079 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
9080 guint32 opsize = is_short ? 1 : 4;
9082 CHECK_OPSIZE (opsize);
9084 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
9087 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
9092 GET_BBLOCK (cfg, tblock, target);
9093 link_bblock (cfg, bblock, tblock);
9094 GET_BBLOCK (cfg, tblock, ip);
9095 link_bblock (cfg, bblock, tblock);
9097 if (sp != stack_start) {
9098 handle_stack_args (cfg, stack_start, sp - stack_start);
9099 CHECK_UNVERIFIABLE (cfg);
9102 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
9103 cmp->sreg1 = sp [0]->dreg;
9104 type_from_op (cmp, sp [0], NULL);
9107 #if SIZEOF_REGISTER == 4
9108 if (cmp->opcode == OP_LCOMPARE_IMM) {
9109 /* Convert it to OP_LCOMPARE */
9110 MONO_INST_NEW (cfg, ins, OP_I8CONST);
9111 ins->type = STACK_I8;
9112 ins->dreg = alloc_dreg (cfg, STACK_I8);
9114 MONO_ADD_INS (bblock, ins);
9115 cmp->opcode = OP_LCOMPARE;
9116 cmp->sreg2 = ins->dreg;
9119 MONO_ADD_INS (bblock, cmp);
9121 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
9122 type_from_op (ins, sp [0], NULL);
9123 MONO_ADD_INS (bblock, ins);
9124 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
9125 GET_BBLOCK (cfg, tblock, target);
9126 ins->inst_true_bb = tblock;
9127 GET_BBLOCK (cfg, tblock, ip);
9128 ins->inst_false_bb = tblock;
9129 start_new_bblock = 2;
9132 inline_costs += BRANCH_COST;
9147 MONO_INST_NEW (cfg, ins, *ip);
9149 target = ip + 4 + (gint32)read32(ip);
9155 inline_costs += BRANCH_COST;
9159 MonoBasicBlock **targets;
9160 MonoBasicBlock *default_bblock;
9161 MonoJumpInfoBBTable *table;
9162 int offset_reg = alloc_preg (cfg);
9163 int target_reg = alloc_preg (cfg);
9164 int table_reg = alloc_preg (cfg);
9165 int sum_reg = alloc_preg (cfg);
9166 gboolean use_op_switch;
9170 n = read32 (ip + 1);
9173 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
9177 CHECK_OPSIZE (n * sizeof (guint32));
9178 target = ip + n * sizeof (guint32);
9180 GET_BBLOCK (cfg, default_bblock, target);
9181 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
9183 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
9184 for (i = 0; i < n; ++i) {
9185 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
9186 targets [i] = tblock;
9187 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
9191 if (sp != stack_start) {
9193 * Link the current bb with the targets as well, so handle_stack_args
9194 * will set their in_stack correctly.
9196 link_bblock (cfg, bblock, default_bblock);
9197 for (i = 0; i < n; ++i)
9198 link_bblock (cfg, bblock, targets [i]);
9200 handle_stack_args (cfg, stack_start, sp - stack_start);
9202 CHECK_UNVERIFIABLE (cfg);
9205 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
9206 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
9209 for (i = 0; i < n; ++i)
9210 link_bblock (cfg, bblock, targets [i]);
9212 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
9213 table->table = targets;
9214 table->table_size = n;
9216 use_op_switch = FALSE;
9218 /* ARM implements SWITCH statements differently */
9219 /* FIXME: Make it use the generic implementation */
9220 if (!cfg->compile_aot)
9221 use_op_switch = TRUE;
9224 if (COMPILE_LLVM (cfg))
9225 use_op_switch = TRUE;
9227 cfg->cbb->has_jump_table = 1;
9229 if (use_op_switch) {
9230 MONO_INST_NEW (cfg, ins, OP_SWITCH);
9231 ins->sreg1 = src1->dreg;
9232 ins->inst_p0 = table;
9233 ins->inst_many_bb = targets;
9234 ins->klass = GUINT_TO_POINTER (n);
9235 MONO_ADD_INS (cfg->cbb, ins);
9237 if (sizeof (gpointer) == 8)
9238 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
9240 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
9242 #if SIZEOF_REGISTER == 8
9243 /* The upper word might not be zero, and we add it to a 64 bit address later */
9244 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
9247 if (cfg->compile_aot) {
9248 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
9250 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
9251 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
9252 ins->inst_p0 = table;
9253 ins->dreg = table_reg;
9254 MONO_ADD_INS (cfg->cbb, ins);
9257 /* FIXME: Use load_memindex */
9258 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
9259 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
9260 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
9262 start_new_bblock = 1;
9263 inline_costs += (BRANCH_COST * 2);
9283 dreg = alloc_freg (cfg);
9286 dreg = alloc_lreg (cfg);
9289 dreg = alloc_ireg_ref (cfg);
9292 dreg = alloc_preg (cfg);
9295 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
9296 ins->type = ldind_type [*ip - CEE_LDIND_I1];
9297 ins->flags |= ins_flag;
9298 MONO_ADD_INS (bblock, ins);
9300 if (ins_flag & MONO_INST_VOLATILE) {
9301 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
9302 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
9318 if (ins_flag & MONO_INST_VOLATILE) {
9319 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
9320 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
9323 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
9324 ins->flags |= ins_flag;
9327 MONO_ADD_INS (bblock, ins);
9329 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0)))
9330 emit_write_barrier (cfg, sp [0], sp [1]);
9339 MONO_INST_NEW (cfg, ins, (*ip));
9341 ins->sreg1 = sp [0]->dreg;
9342 ins->sreg2 = sp [1]->dreg;
9343 type_from_op (ins, sp [0], sp [1]);
9345 ins->dreg = alloc_dreg ((cfg), (ins)->type);
9347 /* Use the immediate opcodes if possible */
9348 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
9349 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
9350 if (imm_opcode != -1) {
9351 ins->opcode = imm_opcode;
9352 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
9355 NULLIFY_INS (sp [1]);
9359 MONO_ADD_INS ((cfg)->cbb, (ins));
9361 *sp++ = mono_decompose_opcode (cfg, ins);
9378 MONO_INST_NEW (cfg, ins, (*ip));
9380 ins->sreg1 = sp [0]->dreg;
9381 ins->sreg2 = sp [1]->dreg;
9382 type_from_op (ins, sp [0], sp [1]);
9384 ADD_WIDEN_OP (ins, sp [0], sp [1]);
9385 ins->dreg = alloc_dreg ((cfg), (ins)->type);
9387 /* FIXME: Pass opcode to is_inst_imm */
9389 /* Use the immediate opcodes if possible */
9390 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
9393 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
9394 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
9395 /* Keep emulated opcodes which are optimized away later */
9396 if ((ins->opcode == OP_IREM_UN || ins->opcode == OP_IDIV_UN_IMM) && (cfg->opt & (MONO_OPT_CONSPROP | MONO_OPT_COPYPROP)) && sp [1]->opcode == OP_ICONST && mono_is_power_of_two (sp [1]->inst_c0) >= 0) {
9397 imm_opcode = mono_op_to_op_imm (ins->opcode);
9400 if (imm_opcode != -1) {
9401 ins->opcode = imm_opcode;
9402 if (sp [1]->opcode == OP_I8CONST) {
9403 #if SIZEOF_REGISTER == 8
9404 ins->inst_imm = sp [1]->inst_l;
9406 ins->inst_ls_word = sp [1]->inst_ls_word;
9407 ins->inst_ms_word = sp [1]->inst_ms_word;
9411 ins->inst_imm = (gssize)(sp [1]->inst_c0);
9414 /* Might be followed by an instruction added by ADD_WIDEN_OP */
9415 if (sp [1]->next == NULL)
9416 NULLIFY_INS (sp [1]);
9419 MONO_ADD_INS ((cfg)->cbb, (ins));
9421 *sp++ = mono_decompose_opcode (cfg, ins);
9434 case CEE_CONV_OVF_I8:
9435 case CEE_CONV_OVF_U8:
9439 /* Special case this earlier so we have long constants in the IR */
9440 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
9441 int data = sp [-1]->inst_c0;
9442 sp [-1]->opcode = OP_I8CONST;
9443 sp [-1]->type = STACK_I8;
9444 #if SIZEOF_REGISTER == 8
9445 if ((*ip) == CEE_CONV_U8)
9446 sp [-1]->inst_c0 = (guint32)data;
9448 sp [-1]->inst_c0 = data;
9450 sp [-1]->inst_ls_word = data;
9451 if ((*ip) == CEE_CONV_U8)
9452 sp [-1]->inst_ms_word = 0;
9454 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
9456 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
9463 case CEE_CONV_OVF_I4:
9464 case CEE_CONV_OVF_I1:
9465 case CEE_CONV_OVF_I2:
9466 case CEE_CONV_OVF_I:
9467 case CEE_CONV_OVF_U:
9470 if (sp [-1]->type == STACK_R8) {
9471 ADD_UNOP (CEE_CONV_OVF_I8);
9478 case CEE_CONV_OVF_U1:
9479 case CEE_CONV_OVF_U2:
9480 case CEE_CONV_OVF_U4:
9483 if (sp [-1]->type == STACK_R8) {
9484 ADD_UNOP (CEE_CONV_OVF_U8);
9491 case CEE_CONV_OVF_I1_UN:
9492 case CEE_CONV_OVF_I2_UN:
9493 case CEE_CONV_OVF_I4_UN:
9494 case CEE_CONV_OVF_I8_UN:
9495 case CEE_CONV_OVF_U1_UN:
9496 case CEE_CONV_OVF_U2_UN:
9497 case CEE_CONV_OVF_U4_UN:
9498 case CEE_CONV_OVF_U8_UN:
9499 case CEE_CONV_OVF_I_UN:
9500 case CEE_CONV_OVF_U_UN:
9507 CHECK_CFG_EXCEPTION;
9511 case CEE_ADD_OVF_UN:
9513 case CEE_MUL_OVF_UN:
9515 case CEE_SUB_OVF_UN:
9521 GSHAREDVT_FAILURE (*ip);
9524 token = read32 (ip + 1);
9525 klass = mini_get_class (method, token, generic_context);
9526 CHECK_TYPELOAD (klass);
9528 if (generic_class_is_reference_type (cfg, klass)) {
9529 MonoInst *store, *load;
9530 int dreg = alloc_ireg_ref (cfg);
9532 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
9533 load->flags |= ins_flag;
9534 MONO_ADD_INS (cfg->cbb, load);
9536 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
9537 store->flags |= ins_flag;
9538 MONO_ADD_INS (cfg->cbb, store);
9540 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER)
9541 emit_write_barrier (cfg, sp [0], sp [1]);
9543 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
9555 token = read32 (ip + 1);
9556 klass = mini_get_class (method, token, generic_context);
9557 CHECK_TYPELOAD (klass);
9559 /* Optimize the common ldobj+stloc combination */
9569 loc_index = ip [5] - CEE_STLOC_0;
9576 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
9577 CHECK_LOCAL (loc_index);
9579 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
9580 ins->dreg = cfg->locals [loc_index]->dreg;
9581 ins->flags |= ins_flag;
9584 if (ins_flag & MONO_INST_VOLATILE) {
9585 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
9586 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
9592 /* Optimize the ldobj+stobj combination */
9593 /* The reference case ends up being a load+store anyway */
9594 /* Skip this if the operation is volatile. */
9595 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass) && !(ins_flag & MONO_INST_VOLATILE)) {
9600 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
9607 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
9608 ins->flags |= ins_flag;
9611 if (ins_flag & MONO_INST_VOLATILE) {
9612 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
9613 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
9622 CHECK_STACK_OVF (1);
9624 n = read32 (ip + 1);
9626 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
9627 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
9628 ins->type = STACK_OBJ;
9631 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
9632 MonoInst *iargs [1];
9633 char *str = mono_method_get_wrapper_data (method, n);
9635 if (cfg->compile_aot)
9636 EMIT_NEW_LDSTRLITCONST (cfg, iargs [0], str);
9638 EMIT_NEW_PCONST (cfg, iargs [0], str);
9639 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
9641 if (cfg->opt & MONO_OPT_SHARED) {
9642 MonoInst *iargs [3];
9644 if (cfg->compile_aot) {
9645 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
9647 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
9648 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
9649 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
9650 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
9651 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
9653 if (bblock->out_of_line) {
9654 MonoInst *iargs [2];
9656 if (image == mono_defaults.corlib) {
9658 * Avoid relocations in AOT and save some space by using a
9659 * version of helper_ldstr specialized to mscorlib.
9661 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
9662 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
9664 /* Avoid creating the string object */
9665 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
9666 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
9667 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
9671 if (cfg->compile_aot) {
9672 NEW_LDSTRCONST (cfg, ins, image, n);
9674 MONO_ADD_INS (bblock, ins);
9677 NEW_PCONST (cfg, ins, NULL);
9678 ins->type = STACK_OBJ;
9679 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
9681 OUT_OF_MEMORY_FAILURE;
9684 MONO_ADD_INS (bblock, ins);
9693 MonoInst *iargs [2];
9694 MonoMethodSignature *fsig;
9697 MonoInst *vtable_arg = NULL;
9700 token = read32 (ip + 1);
9701 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
9702 if (!cmethod || mono_loader_get_last_error ())
9704 fsig = mono_method_get_signature_checked (cmethod, image, token, NULL, &cfg->error);
9707 mono_save_token_info (cfg, image, token, cmethod);
9709 if (!mono_class_init (cmethod->klass))
9710 TYPE_LOAD_ERROR (cmethod->klass);
9712 context_used = mini_method_check_context_used (cfg, cmethod);
9714 if (mono_security_cas_enabled ()) {
9715 if (check_linkdemand (cfg, method, cmethod))
9716 INLINE_FAILURE ("linkdemand");
9717 CHECK_CFG_EXCEPTION;
9718 } else if (mono_security_core_clr_enabled ()) {
9719 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9722 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
9723 emit_generic_class_init (cfg, cmethod->klass);
9724 CHECK_TYPELOAD (cmethod->klass);
9728 if (cfg->gsharedvt) {
9729 if (mini_is_gsharedvt_variable_signature (sig))
9730 GSHAREDVT_FAILURE (*ip);
9734 n = fsig->param_count;
9738 * Generate smaller code for the common newobj <exception> instruction in
9739 * argument checking code.
9741 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
9742 is_exception_class (cmethod->klass) && n <= 2 &&
9743 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
9744 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
9745 MonoInst *iargs [3];
9749 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
9752 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
9756 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
9761 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
9764 g_assert_not_reached ();
9772 /* move the args to allow room for 'this' in the first position */
9778 /* check_call_signature () requires sp[0] to be set */
9779 this_ins.type = STACK_OBJ;
9781 if (check_call_signature (cfg, fsig, sp))
9786 if (mini_class_is_system_array (cmethod->klass)) {
9787 *sp = emit_get_rgctx_method (cfg, context_used,
9788 cmethod, MONO_RGCTX_INFO_METHOD);
9790 /* Avoid varargs in the common case */
9791 if (fsig->param_count == 1)
9792 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
9793 else if (fsig->param_count == 2)
9794 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
9795 else if (fsig->param_count == 3)
9796 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
9797 else if (fsig->param_count == 4)
9798 alloc = mono_emit_jit_icall (cfg, mono_array_new_4, sp);
9800 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
9801 } else if (cmethod->string_ctor) {
9802 g_assert (!context_used);
9803 g_assert (!vtable_arg);
9804 /* we simply pass a null pointer */
9805 EMIT_NEW_PCONST (cfg, *sp, NULL);
9806 /* now call the string ctor */
9807 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, NULL, NULL, NULL);
9809 if (cmethod->klass->valuetype) {
9810 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
9811 emit_init_rvar (cfg, iargs [0]->dreg, &cmethod->klass->byval_arg);
9812 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
9817 * The code generated by mini_emit_virtual_call () expects
9818 * iargs [0] to be a boxed instance, but luckily the vcall
9819 * will be transformed into a normal call there.
9821 } else if (context_used) {
9822 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
9825 MonoVTable *vtable = NULL;
9827 if (!cfg->compile_aot)
9828 vtable = mono_class_vtable (cfg->domain, cmethod->klass);
9829 CHECK_TYPELOAD (cmethod->klass);
9832 * TypeInitializationExceptions thrown from the mono_runtime_class_init
9833 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
9834 * As a workaround, we call class cctors before allocating objects.
9836 if (mini_field_access_needs_cctor_run (cfg, method, cmethod->klass, vtable) && !(g_slist_find (class_inits, cmethod->klass))) {
9837 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, cmethod->klass, helper_sig_class_init_trampoline, NULL);
9838 if (cfg->verbose_level > 2)
9839 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
9840 class_inits = g_slist_prepend (class_inits, cmethod->klass);
9843 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
9846 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
9849 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
9851 /* Now call the actual ctor */
9852 handle_ctor_call (cfg, cmethod, fsig, context_used, sp, ip, &bblock, &inline_costs);
9853 CHECK_CFG_EXCEPTION;
9856 if (alloc == NULL) {
9858 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
9859 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
9867 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip - header->code)))
9868 emit_seq_point (cfg, method, ip, FALSE, TRUE);
9875 token = read32 (ip + 1);
9876 klass = mini_get_class (method, token, generic_context);
9877 CHECK_TYPELOAD (klass);
9878 if (sp [0]->type != STACK_OBJ)
9881 ins = handle_castclass (cfg, klass, *sp, ip, &bblock, &inline_costs);
9882 CHECK_CFG_EXCEPTION;
9891 token = read32 (ip + 1);
9892 klass = mini_get_class (method, token, generic_context);
9893 CHECK_TYPELOAD (klass);
9894 if (sp [0]->type != STACK_OBJ)
9897 context_used = mini_class_check_context_used (cfg, klass);
9899 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
9900 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
9907 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
9910 if (cfg->compile_aot)
9911 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
9913 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
9915 *sp++ = mono_emit_method_call (cfg, mono_isinst, args, NULL);
9918 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
9919 MonoMethod *mono_isinst;
9920 MonoInst *iargs [1];
9923 mono_isinst = mono_marshal_get_isinst (klass);
9926 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
9927 iargs, ip, cfg->real_offset, TRUE, &bblock);
9928 CHECK_CFG_EXCEPTION;
9929 g_assert (costs > 0);
9932 cfg->real_offset += 5;
9936 inline_costs += costs;
9939 ins = handle_isinst (cfg, klass, *sp, context_used);
9940 CHECK_CFG_EXCEPTION;
9947 case CEE_UNBOX_ANY: {
9948 MonoInst *res, *addr;
9953 token = read32 (ip + 1);
9954 klass = mini_get_class (method, token, generic_context);
9955 CHECK_TYPELOAD (klass);
9957 mono_save_token_info (cfg, image, token, klass);
9959 context_used = mini_class_check_context_used (cfg, klass);
9961 if (mini_is_gsharedvt_klass (cfg, klass)) {
9962 res = handle_unbox_gsharedvt (cfg, klass, *sp, &bblock);
9964 } else if (generic_class_is_reference_type (cfg, klass)) {
9965 res = handle_castclass (cfg, klass, *sp, ip, &bblock, &inline_costs);
9966 CHECK_CFG_EXCEPTION;
9967 } else if (mono_class_is_nullable (klass)) {
9968 res = handle_unbox_nullable (cfg, *sp, klass, context_used);
9970 addr = handle_unbox (cfg, klass, sp, context_used);
9972 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
9988 token = read32 (ip + 1);
9989 klass = mini_get_class (method, token, generic_context);
9990 CHECK_TYPELOAD (klass);
9992 mono_save_token_info (cfg, image, token, klass);
9994 context_used = mini_class_check_context_used (cfg, klass);
9996 if (generic_class_is_reference_type (cfg, klass)) {
10002 if (klass == mono_defaults.void_class)
10004 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
10006 /* frequent check in generic code: box (struct), brtrue */
10008 // FIXME: LLVM can't handle the inconsistent bb linking
10009 if (!mono_class_is_nullable (klass) &&
10010 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) &&
10011 (ip [5] == CEE_BRTRUE ||
10012 ip [5] == CEE_BRTRUE_S ||
10013 ip [5] == CEE_BRFALSE ||
10014 ip [5] == CEE_BRFALSE_S)) {
10015 gboolean is_true = ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S;
10017 MonoBasicBlock *true_bb, *false_bb;
10021 if (cfg->verbose_level > 3) {
10022 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
10023 printf ("<box+brtrue opt>\n");
10028 case CEE_BRFALSE_S:
10031 target = ip + 1 + (signed char)(*ip);
10038 target = ip + 4 + (gint)(read32 (ip));
10042 g_assert_not_reached ();
10046 * We need to link both bblocks, since it is needed for handling stack
10047 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
10048 * Branching to only one of them would lead to inconsistencies, so
10049 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
10051 GET_BBLOCK (cfg, true_bb, target);
10052 GET_BBLOCK (cfg, false_bb, ip);
10054 mono_link_bblock (cfg, cfg->cbb, true_bb);
10055 mono_link_bblock (cfg, cfg->cbb, false_bb);
10057 if (sp != stack_start) {
10058 handle_stack_args (cfg, stack_start, sp - stack_start);
10060 CHECK_UNVERIFIABLE (cfg);
10063 if (COMPILE_LLVM (cfg)) {
10064 dreg = alloc_ireg (cfg);
10065 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
10066 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
10068 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
10070 /* The JIT can't eliminate the iconst+compare */
10071 MONO_INST_NEW (cfg, ins, OP_BR);
10072 ins->inst_target_bb = is_true ? true_bb : false_bb;
10073 MONO_ADD_INS (cfg->cbb, ins);
10076 start_new_bblock = 1;
10080 *sp++ = handle_box (cfg, val, klass, context_used, &bblock);
10082 CHECK_CFG_EXCEPTION;
10091 token = read32 (ip + 1);
10092 klass = mini_get_class (method, token, generic_context);
10093 CHECK_TYPELOAD (klass);
10095 mono_save_token_info (cfg, image, token, klass);
10097 context_used = mini_class_check_context_used (cfg, klass);
10099 if (mono_class_is_nullable (klass)) {
10102 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
10103 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
10107 ins = handle_unbox (cfg, klass, sp, context_used);
10120 MonoClassField *field;
10121 #ifndef DISABLE_REMOTING
10125 gboolean is_instance;
10127 gpointer addr = NULL;
10128 gboolean is_special_static;
10130 MonoInst *store_val = NULL;
10131 MonoInst *thread_ins;
10134 is_instance = (op == CEE_LDFLD || op == CEE_LDFLDA || op == CEE_STFLD);
10136 if (op == CEE_STFLD) {
10139 store_val = sp [1];
10144 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
10146 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
10149 if (op == CEE_STSFLD) {
10152 store_val = sp [0];
10157 token = read32 (ip + 1);
10158 if (method->wrapper_type != MONO_WRAPPER_NONE) {
10159 field = mono_method_get_wrapper_data (method, token);
10160 klass = field->parent;
10163 field = mono_field_from_token_checked (image, token, &klass, generic_context, &cfg->error);
10166 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
10167 FIELD_ACCESS_FAILURE (method, field);
10168 mono_class_init (klass);
10170 if (is_instance && *ip != CEE_LDFLDA && is_magic_tls_access (field))
10173 /* if the class is Critical then transparent code cannot access it's fields */
10174 if (!is_instance && mono_security_core_clr_enabled ())
10175 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
10177 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
10178 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
10179 if (mono_security_core_clr_enabled ())
10180 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
10184 * LDFLD etc. is usable on static fields as well, so convert those cases to
10187 if (is_instance && field->type->attrs & FIELD_ATTRIBUTE_STATIC) {
10199 g_assert_not_reached ();
10201 is_instance = FALSE;
10204 context_used = mini_class_check_context_used (cfg, klass);
10206 /* INSTANCE CASE */
10208 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
10209 if (op == CEE_STFLD) {
10210 if (target_type_is_incompatible (cfg, field->type, sp [1]))
10212 #ifndef DISABLE_REMOTING
10213 if ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class) {
10214 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
10215 MonoInst *iargs [5];
10217 GSHAREDVT_FAILURE (op);
10219 iargs [0] = sp [0];
10220 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10221 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
10222 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
10224 iargs [4] = sp [1];
10226 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
10227 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
10228 iargs, ip, cfg->real_offset, TRUE, &bblock);
10229 CHECK_CFG_EXCEPTION;
10230 g_assert (costs > 0);
10232 cfg->real_offset += 5;
10234 inline_costs += costs;
10236 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
10243 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
10245 if (mini_is_gsharedvt_klass (cfg, klass)) {
10246 MonoInst *offset_ins;
10248 context_used = mini_class_check_context_used (cfg, klass);
10250 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10251 dreg = alloc_ireg_mp (cfg);
10252 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10253 /* The decomposition will call mini_emit_stobj () which will emit a wbarrier if needed */
10254 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, dreg, 0, sp [1]->dreg);
10256 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
10258 if (sp [0]->opcode != OP_LDADDR)
10259 store->flags |= MONO_INST_FAULT;
10261 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
10262 /* insert call to write barrier */
10266 dreg = alloc_ireg_mp (cfg);
10267 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
10268 emit_write_barrier (cfg, ptr, sp [1]);
10271 store->flags |= ins_flag;
10278 #ifndef DISABLE_REMOTING
10279 if (is_instance && ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class)) {
10280 MonoMethod *wrapper = (op == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
10281 MonoInst *iargs [4];
10283 GSHAREDVT_FAILURE (op);
10285 iargs [0] = sp [0];
10286 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10287 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
10288 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
10289 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
10290 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
10291 iargs, ip, cfg->real_offset, TRUE, &bblock);
10292 CHECK_CFG_EXCEPTION;
10293 g_assert (costs > 0);
10295 cfg->real_offset += 5;
10299 inline_costs += costs;
10301 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
10307 if (sp [0]->type == STACK_VTYPE) {
10310 /* Have to compute the address of the variable */
10312 var = get_vreg_to_inst (cfg, sp [0]->dreg);
10314 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
10316 g_assert (var->klass == klass);
10318 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
10322 if (op == CEE_LDFLDA) {
10323 if (is_magic_tls_access (field)) {
10324 GSHAREDVT_FAILURE (*ip);
10326 *sp++ = create_magic_tls_access (cfg, field, &cached_tls_addr, ins);
10328 if (sp [0]->type == STACK_OBJ) {
10329 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
10330 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
10333 dreg = alloc_ireg_mp (cfg);
10335 if (mini_is_gsharedvt_klass (cfg, klass)) {
10336 MonoInst *offset_ins;
10338 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10339 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10341 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
10343 ins->klass = mono_class_from_mono_type (field->type);
10344 ins->type = STACK_MP;
10350 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
10352 if (mini_is_gsharedvt_klass (cfg, klass)) {
10353 MonoInst *offset_ins;
10355 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10356 dreg = alloc_ireg_mp (cfg);
10357 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10358 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, dreg, 0);
10360 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
10362 load->flags |= ins_flag;
10363 if (sp [0]->opcode != OP_LDADDR)
10364 load->flags |= MONO_INST_FAULT;
10378 * We can only support shared generic static
10379 * field access on architectures where the
10380 * trampoline code has been extended to handle
10381 * the generic class init.
10383 #ifndef MONO_ARCH_VTABLE_REG
10384 GENERIC_SHARING_FAILURE (op);
10387 context_used = mini_class_check_context_used (cfg, klass);
10389 ftype = mono_field_get_type (field);
10391 if (ftype->attrs & FIELD_ATTRIBUTE_LITERAL)
10394 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
10395 * to be called here.
10397 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
10398 mono_class_vtable (cfg->domain, klass);
10399 CHECK_TYPELOAD (klass);
10401 mono_domain_lock (cfg->domain);
10402 if (cfg->domain->special_static_fields)
10403 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
10404 mono_domain_unlock (cfg->domain);
10406 is_special_static = mono_class_field_is_special_static (field);
10408 if (is_special_static && ((gsize)addr & 0x80000000) == 0)
10409 thread_ins = mono_get_thread_intrinsic (cfg);
10413 /* Generate IR to compute the field address */
10414 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && thread_ins && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
10416 * Fast access to TLS data
10417 * Inline version of get_thread_static_data () in
10421 int idx, static_data_reg, array_reg, dreg;
10423 GSHAREDVT_FAILURE (op);
10425 // offset &= 0x7fffffff;
10426 // idx = (offset >> 24) - 1;
10427 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
10428 MONO_ADD_INS (cfg->cbb, thread_ins);
10429 static_data_reg = alloc_ireg (cfg);
10430 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, MONO_STRUCT_OFFSET (MonoInternalThread, static_data));
10432 if (cfg->compile_aot) {
10433 int offset_reg, offset2_reg, idx_reg;
10435 /* For TLS variables, this will return the TLS offset */
10436 EMIT_NEW_SFLDACONST (cfg, ins, field);
10437 offset_reg = ins->dreg;
10438 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
10439 idx_reg = alloc_ireg (cfg);
10440 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
10441 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
10442 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
10443 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
10444 array_reg = alloc_ireg (cfg);
10445 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
10446 offset2_reg = alloc_ireg (cfg);
10447 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
10448 dreg = alloc_ireg (cfg);
10449 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
10451 offset = (gsize)addr & 0x7fffffff;
10452 idx = (offset >> 24) - 1;
10454 array_reg = alloc_ireg (cfg);
10455 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
10456 dreg = alloc_ireg (cfg);
10457 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, (offset & 0xffffff));
10459 } else if ((cfg->opt & MONO_OPT_SHARED) ||
10460 (cfg->compile_aot && is_special_static) ||
10461 (context_used && is_special_static)) {
10462 MonoInst *iargs [2];
10464 g_assert (field->parent);
10465 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10466 if (context_used) {
10467 iargs [1] = emit_get_rgctx_field (cfg, context_used,
10468 field, MONO_RGCTX_INFO_CLASS_FIELD);
10470 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
10472 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
10473 } else if (context_used) {
10474 MonoInst *static_data;
10477 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
10478 method->klass->name_space, method->klass->name, method->name,
10479 depth, field->offset);
10482 if (mono_class_needs_cctor_run (klass, method))
10483 emit_generic_class_init (cfg, klass);
10486 * The pointer we're computing here is
10488 * super_info.static_data + field->offset
10490 static_data = emit_get_rgctx_klass (cfg, context_used,
10491 klass, MONO_RGCTX_INFO_STATIC_DATA);
10493 if (mini_is_gsharedvt_klass (cfg, klass)) {
10494 MonoInst *offset_ins;
10496 offset_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10497 dreg = alloc_ireg_mp (cfg);
10498 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, static_data->dreg, offset_ins->dreg);
10499 } else if (field->offset == 0) {
10502 int addr_reg = mono_alloc_preg (cfg);
10503 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
10505 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
10506 MonoInst *iargs [2];
10508 g_assert (field->parent);
10509 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10510 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
10511 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
10513 MonoVTable *vtable = NULL;
10515 if (!cfg->compile_aot)
10516 vtable = mono_class_vtable (cfg->domain, klass);
10517 CHECK_TYPELOAD (klass);
10520 if (mini_field_access_needs_cctor_run (cfg, method, klass, vtable)) {
10521 if (!(g_slist_find (class_inits, klass))) {
10522 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, klass, helper_sig_class_init_trampoline, NULL);
10523 if (cfg->verbose_level > 2)
10524 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
10525 class_inits = g_slist_prepend (class_inits, klass);
10528 if (cfg->run_cctors) {
10530 /* This makes so that inline cannot trigger */
10531 /* .cctors: too many apps depend on them */
10532 /* running with a specific order... */
10534 if (! vtable->initialized)
10535 INLINE_FAILURE ("class init");
10536 ex = mono_runtime_class_init_full (vtable, FALSE);
10538 set_exception_object (cfg, ex);
10539 goto exception_exit;
10543 if (cfg->compile_aot)
10544 EMIT_NEW_SFLDACONST (cfg, ins, field);
10547 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
10549 EMIT_NEW_PCONST (cfg, ins, addr);
10552 MonoInst *iargs [1];
10553 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
10554 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
10558 /* Generate IR to do the actual load/store operation */
10560 if ((op == CEE_STFLD || op == CEE_STSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
10561 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
10562 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
10565 if (op == CEE_LDSFLDA) {
10566 ins->klass = mono_class_from_mono_type (ftype);
10567 ins->type = STACK_PTR;
10569 } else if (op == CEE_STSFLD) {
10572 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, ftype, ins->dreg, 0, store_val->dreg);
10573 store->flags |= ins_flag;
10575 gboolean is_const = FALSE;
10576 MonoVTable *vtable = NULL;
10577 gpointer addr = NULL;
10579 if (!context_used) {
10580 vtable = mono_class_vtable (cfg->domain, klass);
10581 CHECK_TYPELOAD (klass);
10583 if ((ftype->attrs & FIELD_ATTRIBUTE_INIT_ONLY) && (((addr = mono_aot_readonly_field_override (field)) != NULL) ||
10584 (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) && vtable->initialized))) {
10585 int ro_type = ftype->type;
10587 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
10588 if (ro_type == MONO_TYPE_VALUETYPE && ftype->data.klass->enumtype) {
10589 ro_type = mono_class_enum_basetype (ftype->data.klass)->type;
10592 GSHAREDVT_FAILURE (op);
10594 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
10597 case MONO_TYPE_BOOLEAN:
10599 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
10603 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
10606 case MONO_TYPE_CHAR:
10608 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
10612 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
10617 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
10621 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
10626 case MONO_TYPE_PTR:
10627 case MONO_TYPE_FNPTR:
10628 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
10629 type_to_eval_stack_type ((cfg), field->type, *sp);
10632 case MONO_TYPE_STRING:
10633 case MONO_TYPE_OBJECT:
10634 case MONO_TYPE_CLASS:
10635 case MONO_TYPE_SZARRAY:
10636 case MONO_TYPE_ARRAY:
10637 if (!mono_gc_is_moving ()) {
10638 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
10639 type_to_eval_stack_type ((cfg), field->type, *sp);
10647 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
10652 case MONO_TYPE_VALUETYPE:
10662 CHECK_STACK_OVF (1);
10664 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
10665 load->flags |= ins_flag;
10671 if ((op == CEE_LDFLD || op == CEE_LDSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
10672 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10673 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10684 token = read32 (ip + 1);
10685 klass = mini_get_class (method, token, generic_context);
10686 CHECK_TYPELOAD (klass);
10687 if (ins_flag & MONO_INST_VOLATILE) {
10688 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
10689 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
10691 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
10692 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
10693 ins->flags |= ins_flag;
10694 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
10695 generic_class_is_reference_type (cfg, klass)) {
10696 /* insert call to write barrier */
10697 emit_write_barrier (cfg, sp [0], sp [1]);
10709 const char *data_ptr;
10711 guint32 field_token;
10717 token = read32 (ip + 1);
10719 klass = mini_get_class (method, token, generic_context);
10720 CHECK_TYPELOAD (klass);
10722 context_used = mini_class_check_context_used (cfg, klass);
10724 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
10725 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_OVF_U4);
10726 ins->sreg1 = sp [0]->dreg;
10727 ins->type = STACK_I4;
10728 ins->dreg = alloc_ireg (cfg);
10729 MONO_ADD_INS (cfg->cbb, ins);
10730 *sp = mono_decompose_opcode (cfg, ins);
10733 if (context_used) {
10734 MonoInst *args [3];
10735 MonoClass *array_class = mono_array_class_get (klass, 1);
10736 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class);
10738 /* FIXME: Use OP_NEWARR and decompose later to help abcrem */
10741 args [0] = emit_get_rgctx_klass (cfg, context_used,
10742 array_class, MONO_RGCTX_INFO_VTABLE);
10747 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
10749 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
10751 if (cfg->opt & MONO_OPT_SHARED) {
10752 /* Decompose now to avoid problems with references to the domainvar */
10753 MonoInst *iargs [3];
10755 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10756 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10757 iargs [2] = sp [0];
10759 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
10761 /* Decompose later since it is needed by abcrem */
10762 MonoClass *array_type = mono_array_class_get (klass, 1);
10763 mono_class_vtable (cfg->domain, array_type);
10764 CHECK_TYPELOAD (array_type);
10766 MONO_INST_NEW (cfg, ins, OP_NEWARR);
10767 ins->dreg = alloc_ireg_ref (cfg);
10768 ins->sreg1 = sp [0]->dreg;
10769 ins->inst_newa_class = klass;
10770 ins->type = STACK_OBJ;
10771 ins->klass = array_type;
10772 MONO_ADD_INS (cfg->cbb, ins);
10773 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
10774 cfg->cbb->has_array_access = TRUE;
10776 /* Needed so mono_emit_load_get_addr () gets called */
10777 mono_get_got_var (cfg);
10787 * we inline/optimize the initialization sequence if possible.
10788 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
10789 * for small sizes open code the memcpy
10790 * ensure the rva field is big enough
10792 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
10793 MonoMethod *memcpy_method = get_memcpy_method ();
10794 MonoInst *iargs [3];
10795 int add_reg = alloc_ireg_mp (cfg);
10797 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, MONO_STRUCT_OFFSET (MonoArray, vector));
10798 if (cfg->compile_aot) {
10799 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
10801 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
10803 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
10804 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
10813 if (sp [0]->type != STACK_OBJ)
10816 MONO_INST_NEW (cfg, ins, OP_LDLEN);
10817 ins->dreg = alloc_preg (cfg);
10818 ins->sreg1 = sp [0]->dreg;
10819 ins->type = STACK_I4;
10820 /* This flag will be inherited by the decomposition */
10821 ins->flags |= MONO_INST_FAULT;
10822 MONO_ADD_INS (cfg->cbb, ins);
10823 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
10824 cfg->cbb->has_array_access = TRUE;
10832 if (sp [0]->type != STACK_OBJ)
10835 cfg->flags |= MONO_CFG_HAS_LDELEMA;
10837 klass = mini_get_class (method, read32 (ip + 1), generic_context);
10838 CHECK_TYPELOAD (klass);
10839 /* we need to make sure that this array is exactly the type it needs
10840 * to be for correctness. the wrappers are lax with their usage
10841 * so we need to ignore them here
10843 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
10844 MonoClass *array_class = mono_array_class_get (klass, 1);
10845 mini_emit_check_array_type (cfg, sp [0], array_class);
10846 CHECK_TYPELOAD (array_class);
10850 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
10855 case CEE_LDELEM_I1:
10856 case CEE_LDELEM_U1:
10857 case CEE_LDELEM_I2:
10858 case CEE_LDELEM_U2:
10859 case CEE_LDELEM_I4:
10860 case CEE_LDELEM_U4:
10861 case CEE_LDELEM_I8:
10863 case CEE_LDELEM_R4:
10864 case CEE_LDELEM_R8:
10865 case CEE_LDELEM_REF: {
10871 if (*ip == CEE_LDELEM) {
10873 token = read32 (ip + 1);
10874 klass = mini_get_class (method, token, generic_context);
10875 CHECK_TYPELOAD (klass);
10876 mono_class_init (klass);
10879 klass = array_access_to_klass (*ip);
10881 if (sp [0]->type != STACK_OBJ)
10884 cfg->flags |= MONO_CFG_HAS_LDELEMA;
10886 if (mini_is_gsharedvt_variable_klass (cfg, klass)) {
10887 // FIXME-VT: OP_ICONST optimization
10888 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
10889 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
10890 ins->opcode = OP_LOADV_MEMBASE;
10891 } else if (sp [1]->opcode == OP_ICONST) {
10892 int array_reg = sp [0]->dreg;
10893 int index_reg = sp [1]->dreg;
10894 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
10896 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
10897 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
10899 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
10900 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
10903 if (*ip == CEE_LDELEM)
10910 case CEE_STELEM_I1:
10911 case CEE_STELEM_I2:
10912 case CEE_STELEM_I4:
10913 case CEE_STELEM_I8:
10914 case CEE_STELEM_R4:
10915 case CEE_STELEM_R8:
10916 case CEE_STELEM_REF:
10921 cfg->flags |= MONO_CFG_HAS_LDELEMA;
10923 if (*ip == CEE_STELEM) {
10925 token = read32 (ip + 1);
10926 klass = mini_get_class (method, token, generic_context);
10927 CHECK_TYPELOAD (klass);
10928 mono_class_init (klass);
10931 klass = array_access_to_klass (*ip);
10933 if (sp [0]->type != STACK_OBJ)
10936 emit_array_store (cfg, klass, sp, TRUE);
10938 if (*ip == CEE_STELEM)
10945 case CEE_CKFINITE: {
10949 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
10950 ins->sreg1 = sp [0]->dreg;
10951 ins->dreg = alloc_freg (cfg);
10952 ins->type = STACK_R8;
10953 MONO_ADD_INS (bblock, ins);
10955 *sp++ = mono_decompose_opcode (cfg, ins);
10960 case CEE_REFANYVAL: {
10961 MonoInst *src_var, *src;
10963 int klass_reg = alloc_preg (cfg);
10964 int dreg = alloc_preg (cfg);
10966 GSHAREDVT_FAILURE (*ip);
10969 MONO_INST_NEW (cfg, ins, *ip);
10972 klass = mini_get_class (method, read32 (ip + 1), generic_context);
10973 CHECK_TYPELOAD (klass);
10975 context_used = mini_class_check_context_used (cfg, klass);
10978 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
10980 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
10981 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
10982 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass));
10984 if (context_used) {
10985 MonoInst *klass_ins;
10987 klass_ins = emit_get_rgctx_klass (cfg, context_used,
10988 klass, MONO_RGCTX_INFO_KLASS);
10991 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
10992 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
10994 mini_emit_class_check (cfg, klass_reg, klass);
10996 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value));
10997 ins->type = STACK_MP;
11002 case CEE_MKREFANY: {
11003 MonoInst *loc, *addr;
11005 GSHAREDVT_FAILURE (*ip);
11008 MONO_INST_NEW (cfg, ins, *ip);
11011 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11012 CHECK_TYPELOAD (klass);
11014 context_used = mini_class_check_context_used (cfg, klass);
11016 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
11017 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
11019 if (context_used) {
11020 MonoInst *const_ins;
11021 int type_reg = alloc_preg (cfg);
11023 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
11024 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
11025 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
11026 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
11027 } else if (cfg->compile_aot) {
11028 int const_reg = alloc_preg (cfg);
11029 int type_reg = alloc_preg (cfg);
11031 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
11032 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
11033 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
11034 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
11036 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
11037 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), klass);
11039 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
11041 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
11042 ins->type = STACK_VTYPE;
11043 ins->klass = mono_defaults.typed_reference_class;
11048 case CEE_LDTOKEN: {
11050 MonoClass *handle_class;
11052 CHECK_STACK_OVF (1);
11055 n = read32 (ip + 1);
11057 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
11058 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
11059 handle = mono_method_get_wrapper_data (method, n);
11060 handle_class = mono_method_get_wrapper_data (method, n + 1);
11061 if (handle_class == mono_defaults.typehandle_class)
11062 handle = &((MonoClass*)handle)->byval_arg;
11065 handle = mono_ldtoken_checked (image, n, &handle_class, generic_context, &cfg->error);
11070 mono_class_init (handle_class);
11071 if (cfg->generic_sharing_context) {
11072 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
11073 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
11074 /* This case handles ldtoken
11075 of an open type, like for
11078 } else if (handle_class == mono_defaults.typehandle_class) {
11079 context_used = mini_class_check_context_used (cfg, mono_class_from_mono_type (handle));
11080 } else if (handle_class == mono_defaults.fieldhandle_class)
11081 context_used = mini_class_check_context_used (cfg, ((MonoClassField*)handle)->parent);
11082 else if (handle_class == mono_defaults.methodhandle_class)
11083 context_used = mini_method_check_context_used (cfg, handle);
11085 g_assert_not_reached ();
11088 if ((cfg->opt & MONO_OPT_SHARED) &&
11089 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
11090 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
11091 MonoInst *addr, *vtvar, *iargs [3];
11092 int method_context_used;
11094 method_context_used = mini_method_check_context_used (cfg, method);
11096 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
11098 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
11099 EMIT_NEW_ICONST (cfg, iargs [1], n);
11100 if (method_context_used) {
11101 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
11102 method, MONO_RGCTX_INFO_METHOD);
11103 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
11105 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
11106 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
11108 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11110 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
11112 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11114 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
11115 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
11116 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
11117 (cmethod->klass == mono_defaults.systemtype_class) &&
11118 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
11119 MonoClass *tclass = mono_class_from_mono_type (handle);
11121 mono_class_init (tclass);
11122 if (context_used) {
11123 ins = emit_get_rgctx_klass (cfg, context_used,
11124 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
11125 } else if (cfg->compile_aot) {
11126 if (method->wrapper_type) {
11127 mono_error_init (&error); //got to do it since there are multiple conditionals below
11128 if (mono_class_get_checked (tclass->image, tclass->type_token, &error) == tclass && !generic_context) {
11129 /* Special case for static synchronized wrappers */
11130 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
11132 mono_error_cleanup (&error); /* FIXME don't swallow the error */
11133 /* FIXME: n is not a normal token */
11135 EMIT_NEW_PCONST (cfg, ins, NULL);
11138 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
11141 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
11143 ins->type = STACK_OBJ;
11144 ins->klass = cmethod->klass;
11147 MonoInst *addr, *vtvar;
11149 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
11151 if (context_used) {
11152 if (handle_class == mono_defaults.typehandle_class) {
11153 ins = emit_get_rgctx_klass (cfg, context_used,
11154 mono_class_from_mono_type (handle),
11155 MONO_RGCTX_INFO_TYPE);
11156 } else if (handle_class == mono_defaults.methodhandle_class) {
11157 ins = emit_get_rgctx_method (cfg, context_used,
11158 handle, MONO_RGCTX_INFO_METHOD);
11159 } else if (handle_class == mono_defaults.fieldhandle_class) {
11160 ins = emit_get_rgctx_field (cfg, context_used,
11161 handle, MONO_RGCTX_INFO_CLASS_FIELD);
11163 g_assert_not_reached ();
11165 } else if (cfg->compile_aot) {
11166 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n, generic_context);
11168 EMIT_NEW_PCONST (cfg, ins, handle);
11170 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11171 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
11172 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11182 MONO_INST_NEW (cfg, ins, OP_THROW);
11184 ins->sreg1 = sp [0]->dreg;
11186 bblock->out_of_line = TRUE;
11187 MONO_ADD_INS (bblock, ins);
11188 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
11189 MONO_ADD_INS (bblock, ins);
11192 link_bblock (cfg, bblock, end_bblock);
11193 start_new_bblock = 1;
11195 case CEE_ENDFINALLY:
11196 /* mono_save_seq_point_info () depends on this */
11197 if (sp != stack_start)
11198 emit_seq_point (cfg, method, ip, FALSE, FALSE);
11199 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
11200 MONO_ADD_INS (bblock, ins);
11202 start_new_bblock = 1;
11205 * Control will leave the method so empty the stack, otherwise
11206 * the next basic block will start with a nonempty stack.
11208 while (sp != stack_start) {
11213 case CEE_LEAVE_S: {
11216 if (*ip == CEE_LEAVE) {
11218 target = ip + 5 + (gint32)read32(ip + 1);
11221 target = ip + 2 + (signed char)(ip [1]);
11224 /* empty the stack */
11225 while (sp != stack_start) {
11230 * If this leave statement is in a catch block, check for a
11231 * pending exception, and rethrow it if necessary.
11232 * We avoid doing this in runtime invoke wrappers, since those are called
11233 * by native code which excepts the wrapper to catch all exceptions.
11235 for (i = 0; i < header->num_clauses; ++i) {
11236 MonoExceptionClause *clause = &header->clauses [i];
11239 * Use <= in the final comparison to handle clauses with multiple
11240 * leave statements, like in bug #78024.
11241 * The ordering of the exception clauses guarantees that we find the
11242 * innermost clause.
11244 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
11246 MonoBasicBlock *dont_throw;
11251 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
11254 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
11256 NEW_BBLOCK (cfg, dont_throw);
11259 * Currently, we always rethrow the abort exception, despite the
11260 * fact that this is not correct. See thread6.cs for an example.
11261 * But propagating the abort exception is more important than
11262 * getting the sematics right.
11264 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
11265 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
11266 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
11268 MONO_START_BB (cfg, dont_throw);
11273 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
11275 MonoExceptionClause *clause;
11277 for (tmp = handlers; tmp; tmp = tmp->next) {
11278 clause = tmp->data;
11279 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
11281 link_bblock (cfg, bblock, tblock);
11282 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
11283 ins->inst_target_bb = tblock;
11284 ins->inst_eh_block = clause;
11285 MONO_ADD_INS (bblock, ins);
11286 bblock->has_call_handler = 1;
11287 if (COMPILE_LLVM (cfg)) {
11288 MonoBasicBlock *target_bb;
11291 * Link the finally bblock with the target, since it will
11292 * conceptually branch there.
11293 * FIXME: Have to link the bblock containing the endfinally.
11295 GET_BBLOCK (cfg, target_bb, target);
11296 link_bblock (cfg, tblock, target_bb);
11299 g_list_free (handlers);
11302 MONO_INST_NEW (cfg, ins, OP_BR);
11303 MONO_ADD_INS (bblock, ins);
11304 GET_BBLOCK (cfg, tblock, target);
11305 link_bblock (cfg, bblock, tblock);
11306 ins->inst_target_bb = tblock;
11307 start_new_bblock = 1;
11309 if (*ip == CEE_LEAVE)
11318 * Mono specific opcodes
11320 case MONO_CUSTOM_PREFIX: {
11322 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
11326 case CEE_MONO_ICALL: {
11328 MonoJitICallInfo *info;
11330 token = read32 (ip + 2);
11331 func = mono_method_get_wrapper_data (method, token);
11332 info = mono_find_jit_icall_by_addr (func);
11334 g_error ("Could not find icall address in wrapper %s", mono_method_full_name (method, 1));
11337 CHECK_STACK (info->sig->param_count);
11338 sp -= info->sig->param_count;
11340 ins = mono_emit_jit_icall (cfg, info->func, sp);
11341 if (!MONO_TYPE_IS_VOID (info->sig->ret))
11345 inline_costs += 10 * num_calls++;
11349 case CEE_MONO_LDPTR: {
11352 CHECK_STACK_OVF (1);
11354 token = read32 (ip + 2);
11356 ptr = mono_method_get_wrapper_data (method, token);
11357 /* FIXME: Generalize this */
11358 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
11359 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
11364 EMIT_NEW_PCONST (cfg, ins, ptr);
11367 inline_costs += 10 * num_calls++;
11368 /* Can't embed random pointers into AOT code */
11372 case CEE_MONO_JIT_ICALL_ADDR: {
11373 MonoJitICallInfo *callinfo;
11376 CHECK_STACK_OVF (1);
11378 token = read32 (ip + 2);
11380 ptr = mono_method_get_wrapper_data (method, token);
11381 callinfo = mono_find_jit_icall_by_addr (ptr);
11382 g_assert (callinfo);
11383 EMIT_NEW_JIT_ICALL_ADDRCONST (cfg, ins, (char*)callinfo->name);
11386 inline_costs += 10 * num_calls++;
11389 case CEE_MONO_ICALL_ADDR: {
11390 MonoMethod *cmethod;
11393 CHECK_STACK_OVF (1);
11395 token = read32 (ip + 2);
11397 cmethod = mono_method_get_wrapper_data (method, token);
11399 if (cfg->compile_aot) {
11400 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
11402 ptr = mono_lookup_internal_call (cmethod);
11404 EMIT_NEW_PCONST (cfg, ins, ptr);
11410 case CEE_MONO_VTADDR: {
11411 MonoInst *src_var, *src;
11417 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
11418 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
11423 case CEE_MONO_NEWOBJ: {
11424 MonoInst *iargs [2];
11426 CHECK_STACK_OVF (1);
11428 token = read32 (ip + 2);
11429 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
11430 mono_class_init (klass);
11431 NEW_DOMAINCONST (cfg, iargs [0]);
11432 MONO_ADD_INS (cfg->cbb, iargs [0]);
11433 NEW_CLASSCONST (cfg, iargs [1], klass);
11434 MONO_ADD_INS (cfg->cbb, iargs [1]);
11435 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
11437 inline_costs += 10 * num_calls++;
11440 case CEE_MONO_OBJADDR:
11443 MONO_INST_NEW (cfg, ins, OP_MOVE);
11444 ins->dreg = alloc_ireg_mp (cfg);
11445 ins->sreg1 = sp [0]->dreg;
11446 ins->type = STACK_MP;
11447 MONO_ADD_INS (cfg->cbb, ins);
11451 case CEE_MONO_LDNATIVEOBJ:
11453 * Similar to LDOBJ, but instead load the unmanaged
11454 * representation of the vtype to the stack.
11459 token = read32 (ip + 2);
11460 klass = mono_method_get_wrapper_data (method, token);
11461 g_assert (klass->valuetype);
11462 mono_class_init (klass);
11465 MonoInst *src, *dest, *temp;
11468 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
11469 temp->backend.is_pinvoke = 1;
11470 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
11471 mini_emit_stobj (cfg, dest, src, klass, TRUE);
11473 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
11474 dest->type = STACK_VTYPE;
11475 dest->klass = klass;
11481 case CEE_MONO_RETOBJ: {
11483 * Same as RET, but return the native representation of a vtype
11486 g_assert (cfg->ret);
11487 g_assert (mono_method_signature (method)->pinvoke);
11492 token = read32 (ip + 2);
11493 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
11495 if (!cfg->vret_addr) {
11496 g_assert (cfg->ret_var_is_local);
11498 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
11500 EMIT_NEW_RETLOADA (cfg, ins);
11502 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
11504 if (sp != stack_start)
11507 MONO_INST_NEW (cfg, ins, OP_BR);
11508 ins->inst_target_bb = end_bblock;
11509 MONO_ADD_INS (bblock, ins);
11510 link_bblock (cfg, bblock, end_bblock);
11511 start_new_bblock = 1;
11515 case CEE_MONO_CISINST:
11516 case CEE_MONO_CCASTCLASS: {
11521 token = read32 (ip + 2);
11522 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
11523 if (ip [1] == CEE_MONO_CISINST)
11524 ins = handle_cisinst (cfg, klass, sp [0]);
11526 ins = handle_ccastclass (cfg, klass, sp [0]);
11532 case CEE_MONO_SAVE_LMF:
11533 case CEE_MONO_RESTORE_LMF:
11534 #ifdef MONO_ARCH_HAVE_LMF_OPS
11535 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
11536 MONO_ADD_INS (bblock, ins);
11537 cfg->need_lmf_area = TRUE;
11541 case CEE_MONO_CLASSCONST:
11542 CHECK_STACK_OVF (1);
11544 token = read32 (ip + 2);
11545 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
11548 inline_costs += 10 * num_calls++;
11550 case CEE_MONO_NOT_TAKEN:
11551 bblock->out_of_line = TRUE;
11554 case CEE_MONO_TLS: {
11557 CHECK_STACK_OVF (1);
11559 key = (gint32)read32 (ip + 2);
11560 g_assert (key < TLS_KEY_NUM);
11562 ins = mono_create_tls_get (cfg, key);
11564 if (cfg->compile_aot) {
11566 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
11567 ins->dreg = alloc_preg (cfg);
11568 ins->type = STACK_PTR;
11570 g_assert_not_reached ();
11573 ins->type = STACK_PTR;
11574 MONO_ADD_INS (bblock, ins);
11579 case CEE_MONO_DYN_CALL: {
11580 MonoCallInst *call;
11582 /* It would be easier to call a trampoline, but that would put an
11583 * extra frame on the stack, confusing exception handling. So
11584 * implement it inline using an opcode for now.
11587 if (!cfg->dyn_call_var) {
11588 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
11589 /* prevent it from being register allocated */
11590 cfg->dyn_call_var->flags |= MONO_INST_VOLATILE;
11593 /* Has to use a call inst since it local regalloc expects it */
11594 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
11595 ins = (MonoInst*)call;
11597 ins->sreg1 = sp [0]->dreg;
11598 ins->sreg2 = sp [1]->dreg;
11599 MONO_ADD_INS (bblock, ins);
11601 cfg->param_area = MAX (cfg->param_area, MONO_ARCH_DYN_CALL_PARAM_AREA);
11604 inline_costs += 10 * num_calls++;
11608 case CEE_MONO_MEMORY_BARRIER: {
11610 emit_memory_barrier (cfg, (int)read32 (ip + 2));
11614 case CEE_MONO_JIT_ATTACH: {
11615 MonoInst *args [16], *domain_ins;
11616 MonoInst *ad_ins, *jit_tls_ins;
11617 MonoBasicBlock *next_bb = NULL, *call_bb = NULL;
11619 cfg->orig_domain_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
11621 EMIT_NEW_PCONST (cfg, ins, NULL);
11622 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
11624 ad_ins = mono_get_domain_intrinsic (cfg);
11625 jit_tls_ins = mono_get_jit_tls_intrinsic (cfg);
11627 if (MONO_ARCH_HAVE_TLS_GET && ad_ins && jit_tls_ins) {
11628 NEW_BBLOCK (cfg, next_bb);
11629 NEW_BBLOCK (cfg, call_bb);
11631 if (cfg->compile_aot) {
11632 /* AOT code is only used in the root domain */
11633 EMIT_NEW_PCONST (cfg, domain_ins, NULL);
11635 EMIT_NEW_PCONST (cfg, domain_ins, cfg->domain);
11637 MONO_ADD_INS (cfg->cbb, ad_ins);
11638 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, ad_ins->dreg, domain_ins->dreg);
11639 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, call_bb);
11641 MONO_ADD_INS (cfg->cbb, jit_tls_ins);
11642 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, jit_tls_ins->dreg, 0);
11643 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, call_bb);
11645 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, next_bb);
11646 MONO_START_BB (cfg, call_bb);
11649 if (cfg->compile_aot) {
11650 /* AOT code is only used in the root domain */
11651 EMIT_NEW_PCONST (cfg, args [0], NULL);
11653 EMIT_NEW_PCONST (cfg, args [0], cfg->domain);
11655 ins = mono_emit_jit_icall (cfg, mono_jit_thread_attach, args);
11656 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
11659 MONO_START_BB (cfg, next_bb);
11665 case CEE_MONO_JIT_DETACH: {
11666 MonoInst *args [16];
11668 /* Restore the original domain */
11669 dreg = alloc_ireg (cfg);
11670 EMIT_NEW_UNALU (cfg, args [0], OP_MOVE, dreg, cfg->orig_domain_var->dreg);
11671 mono_emit_jit_icall (cfg, mono_jit_set_domain, args);
11676 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
11682 case CEE_PREFIX1: {
11685 case CEE_ARGLIST: {
11686 /* somewhat similar to LDTOKEN */
11687 MonoInst *addr, *vtvar;
11688 CHECK_STACK_OVF (1);
11689 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
11691 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11692 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
11694 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11695 ins->type = STACK_VTYPE;
11696 ins->klass = mono_defaults.argumenthandle_class;
11709 * The following transforms:
11710 * CEE_CEQ into OP_CEQ
11711 * CEE_CGT into OP_CGT
11712 * CEE_CGT_UN into OP_CGT_UN
11713 * CEE_CLT into OP_CLT
11714 * CEE_CLT_UN into OP_CLT_UN
11716 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
11718 MONO_INST_NEW (cfg, ins, cmp->opcode);
11720 cmp->sreg1 = sp [0]->dreg;
11721 cmp->sreg2 = sp [1]->dreg;
11722 type_from_op (cmp, sp [0], sp [1]);
11724 if ((sp [0]->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
11725 cmp->opcode = OP_LCOMPARE;
11726 else if (sp [0]->type == STACK_R8)
11727 cmp->opcode = OP_FCOMPARE;
11729 cmp->opcode = OP_ICOMPARE;
11730 MONO_ADD_INS (bblock, cmp);
11731 ins->type = STACK_I4;
11732 ins->dreg = alloc_dreg (cfg, ins->type);
11733 type_from_op (ins, sp [0], sp [1]);
11735 if (cmp->opcode == OP_FCOMPARE) {
11737 * The backends expect the fceq opcodes to do the
11740 ins->sreg1 = cmp->sreg1;
11741 ins->sreg2 = cmp->sreg2;
11744 MONO_ADD_INS (bblock, ins);
11750 MonoInst *argconst;
11751 MonoMethod *cil_method;
11753 CHECK_STACK_OVF (1);
11755 n = read32 (ip + 2);
11756 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
11757 if (!cmethod || mono_loader_get_last_error ())
11759 mono_class_init (cmethod->klass);
11761 mono_save_token_info (cfg, image, n, cmethod);
11763 context_used = mini_method_check_context_used (cfg, cmethod);
11765 cil_method = cmethod;
11766 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
11767 METHOD_ACCESS_FAILURE (method, cil_method);
11769 if (mono_security_cas_enabled ()) {
11770 if (check_linkdemand (cfg, method, cmethod))
11771 INLINE_FAILURE ("linkdemand");
11772 CHECK_CFG_EXCEPTION;
11773 } else if (mono_security_core_clr_enabled ()) {
11774 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
11778 * Optimize the common case of ldftn+delegate creation
11780 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
11781 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
11782 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
11783 MonoInst *target_ins, *handle_ins;
11784 MonoMethod *invoke;
11785 int invoke_context_used;
11787 invoke = mono_get_delegate_invoke (ctor_method->klass);
11788 if (!invoke || !mono_method_signature (invoke))
11791 invoke_context_used = mini_method_check_context_used (cfg, invoke);
11793 target_ins = sp [-1];
11795 if (mono_security_core_clr_enabled ())
11796 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method, bblock, ip);
11798 if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
11799 /*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
11800 if (mono_method_signature (invoke)->param_count == mono_method_signature (cmethod)->param_count) {
11801 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0);
11802 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException");
11806 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE)
11807 /* FIXME: SGEN support */
11808 if (invoke_context_used == 0) {
11810 if (cfg->verbose_level > 3)
11811 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
11812 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, FALSE))) {
11815 CHECK_CFG_EXCEPTION;
11826 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
11827 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
11831 inline_costs += 10 * num_calls++;
11834 case CEE_LDVIRTFTN: {
11835 MonoInst *args [2];
11839 n = read32 (ip + 2);
11840 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
11841 if (!cmethod || mono_loader_get_last_error ())
11843 mono_class_init (cmethod->klass);
11845 context_used = mini_method_check_context_used (cfg, cmethod);
11847 if (mono_security_cas_enabled ()) {
11848 if (check_linkdemand (cfg, method, cmethod))
11849 INLINE_FAILURE ("linkdemand");
11850 CHECK_CFG_EXCEPTION;
11851 } else if (mono_security_core_clr_enabled ()) {
11852 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
11856 * Optimize the common case of ldvirtftn+delegate creation
11858 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ) && (ip > header->code && ip [-1] == CEE_DUP)) {
11859 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
11860 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
11861 MonoInst *target_ins, *handle_ins;
11862 MonoMethod *invoke;
11863 int invoke_context_used;
11865 invoke = mono_get_delegate_invoke (ctor_method->klass);
11866 if (!invoke || !mono_method_signature (invoke))
11869 invoke_context_used = mini_method_check_context_used (cfg, invoke);
11871 target_ins = sp [-1];
11873 if (mono_security_core_clr_enabled ())
11874 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method, bblock, ip);
11876 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE)
11877 /* FIXME: SGEN support */
11878 if (invoke_context_used == 0) {
11880 if (cfg->verbose_level > 3)
11881 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
11882 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, TRUE))) {
11885 CHECK_CFG_EXCEPTION;
11899 args [1] = emit_get_rgctx_method (cfg, context_used,
11900 cmethod, MONO_RGCTX_INFO_METHOD);
11903 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
11905 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
11908 inline_costs += 10 * num_calls++;
11912 CHECK_STACK_OVF (1);
11914 n = read16 (ip + 2);
11916 EMIT_NEW_ARGLOAD (cfg, ins, n);
11921 CHECK_STACK_OVF (1);
11923 n = read16 (ip + 2);
11925 NEW_ARGLOADA (cfg, ins, n);
11926 MONO_ADD_INS (cfg->cbb, ins);
11934 n = read16 (ip + 2);
11936 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
11938 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
11942 CHECK_STACK_OVF (1);
11944 n = read16 (ip + 2);
11946 EMIT_NEW_LOCLOAD (cfg, ins, n);
11951 unsigned char *tmp_ip;
11952 CHECK_STACK_OVF (1);
11954 n = read16 (ip + 2);
11957 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
11963 EMIT_NEW_LOCLOADA (cfg, ins, n);
11972 n = read16 (ip + 2);
11974 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
11976 emit_stloc_ir (cfg, sp, header, n);
11983 if (sp != stack_start)
11985 if (cfg->method != method)
11987 * Inlining this into a loop in a parent could lead to
11988 * stack overflows which is different behavior than the
11989 * non-inlined case, thus disable inlining in this case.
11991 INLINE_FAILURE("localloc");
11993 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
11994 ins->dreg = alloc_preg (cfg);
11995 ins->sreg1 = sp [0]->dreg;
11996 ins->type = STACK_PTR;
11997 MONO_ADD_INS (cfg->cbb, ins);
11999 cfg->flags |= MONO_CFG_HAS_ALLOCA;
12001 ins->flags |= MONO_INST_INIT;
12006 case CEE_ENDFILTER: {
12007 MonoExceptionClause *clause, *nearest;
12008 int cc, nearest_num;
12012 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
12014 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
12015 ins->sreg1 = (*sp)->dreg;
12016 MONO_ADD_INS (bblock, ins);
12017 start_new_bblock = 1;
12022 for (cc = 0; cc < header->num_clauses; ++cc) {
12023 clause = &header->clauses [cc];
12024 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
12025 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
12026 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
12031 g_assert (nearest);
12032 if ((ip - header->code) != nearest->handler_offset)
12037 case CEE_UNALIGNED_:
12038 ins_flag |= MONO_INST_UNALIGNED;
12039 /* FIXME: record alignment? we can assume 1 for now */
12043 case CEE_VOLATILE_:
12044 ins_flag |= MONO_INST_VOLATILE;
12048 ins_flag |= MONO_INST_TAILCALL;
12049 cfg->flags |= MONO_CFG_HAS_TAIL;
12050 /* Can't inline tail calls at this time */
12051 inline_costs += 100000;
12058 token = read32 (ip + 2);
12059 klass = mini_get_class (method, token, generic_context);
12060 CHECK_TYPELOAD (klass);
12061 if (generic_class_is_reference_type (cfg, klass))
12062 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
12064 mini_emit_initobj (cfg, *sp, NULL, klass);
12068 case CEE_CONSTRAINED_:
12070 token = read32 (ip + 2);
12071 constrained_call = mini_get_class (method, token, generic_context);
12072 CHECK_TYPELOAD (constrained_call);
12076 case CEE_INITBLK: {
12077 MonoInst *iargs [3];
12081 /* Skip optimized paths for volatile operations. */
12082 if ((ip [1] == CEE_CPBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
12083 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
12084 } else if ((ip [1] == CEE_INITBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
12085 /* emit_memset only works when val == 0 */
12086 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
12089 iargs [0] = sp [0];
12090 iargs [1] = sp [1];
12091 iargs [2] = sp [2];
12092 if (ip [1] == CEE_CPBLK) {
12094 * FIXME: It's unclear whether we should be emitting both the acquire
12095 * and release barriers for cpblk. It is technically both a load and
12096 * store operation, so it seems like that's the sensible thing to do.
12098 * FIXME: We emit full barriers on both sides of the operation for
12099 * simplicity. We should have a separate atomic memcpy method instead.
12101 MonoMethod *memcpy_method = get_memcpy_method ();
12103 if (ins_flag & MONO_INST_VOLATILE)
12104 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
12106 call = mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
12107 call->flags |= ins_flag;
12109 if (ins_flag & MONO_INST_VOLATILE)
12110 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
12112 MonoMethod *memset_method = get_memset_method ();
12113 if (ins_flag & MONO_INST_VOLATILE) {
12114 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
12115 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
12117 call = mono_emit_method_call (cfg, memset_method, iargs, NULL);
12118 call->flags |= ins_flag;
12129 ins_flag |= MONO_INST_NOTYPECHECK;
12131 ins_flag |= MONO_INST_NORANGECHECK;
12132 /* we ignore the no-nullcheck for now since we
12133 * really do it explicitly only when doing callvirt->call
12137 case CEE_RETHROW: {
12139 int handler_offset = -1;
12141 for (i = 0; i < header->num_clauses; ++i) {
12142 MonoExceptionClause *clause = &header->clauses [i];
12143 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
12144 handler_offset = clause->handler_offset;
12149 bblock->flags |= BB_EXCEPTION_UNSAFE;
12151 if (handler_offset == -1)
12154 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
12155 MONO_INST_NEW (cfg, ins, OP_RETHROW);
12156 ins->sreg1 = load->dreg;
12157 MONO_ADD_INS (bblock, ins);
12159 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
12160 MONO_ADD_INS (bblock, ins);
12163 link_bblock (cfg, bblock, end_bblock);
12164 start_new_bblock = 1;
12172 CHECK_STACK_OVF (1);
12174 token = read32 (ip + 2);
12175 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !image_is_dynamic (method->klass->image) && !generic_context) {
12176 MonoType *type = mono_type_create_from_typespec_checked (image, token, &cfg->error);
12179 val = mono_type_size (type, &ialign);
12181 MonoClass *klass = mini_get_class (method, token, generic_context);
12182 CHECK_TYPELOAD (klass);
12184 val = mono_type_size (&klass->byval_arg, &ialign);
12186 if (mini_is_gsharedvt_klass (cfg, klass))
12187 GSHAREDVT_FAILURE (*ip);
12189 EMIT_NEW_ICONST (cfg, ins, val);
12194 case CEE_REFANYTYPE: {
12195 MonoInst *src_var, *src;
12197 GSHAREDVT_FAILURE (*ip);
12203 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
12205 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
12206 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
12207 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type));
12212 case CEE_READONLY_:
12225 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
12235 g_warning ("opcode 0x%02x not handled", *ip);
12239 if (start_new_bblock != 1)
12242 bblock->cil_length = ip - bblock->cil_code;
12243 if (bblock->next_bb) {
12244 /* This could already be set because of inlining, #693905 */
12245 MonoBasicBlock *bb = bblock;
12247 while (bb->next_bb)
12249 bb->next_bb = end_bblock;
12251 bblock->next_bb = end_bblock;
12254 if (cfg->method == method && cfg->domainvar) {
12256 MonoInst *get_domain;
12258 cfg->cbb = init_localsbb;
12260 if ((get_domain = mono_get_domain_intrinsic (cfg))) {
12261 MONO_ADD_INS (cfg->cbb, get_domain);
12263 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
12265 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
12266 MONO_ADD_INS (cfg->cbb, store);
12269 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
12270 if (cfg->compile_aot)
12271 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
12272 mono_get_got_var (cfg);
12275 if (cfg->method == method && cfg->got_var)
12276 mono_emit_load_got_addr (cfg);
12278 if (init_localsbb) {
12279 cfg->cbb = init_localsbb;
12281 for (i = 0; i < header->num_locals; ++i) {
12282 emit_init_local (cfg, i, header->locals [i], init_locals);
12286 if (cfg->init_ref_vars && cfg->method == method) {
12287 /* Emit initialization for ref vars */
12288 // FIXME: Avoid duplication initialization for IL locals.
12289 for (i = 0; i < cfg->num_varinfo; ++i) {
12290 MonoInst *ins = cfg->varinfo [i];
12292 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
12293 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
12297 if (cfg->lmf_var && cfg->method == method) {
12298 cfg->cbb = init_localsbb;
12299 emit_push_lmf (cfg);
12302 cfg->cbb = init_localsbb;
12303 emit_instrumentation_call (cfg, mono_profiler_method_enter);
12306 MonoBasicBlock *bb;
12309 * Make seq points at backward branch targets interruptable.
12311 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
12312 if (bb->code && bb->in_count > 1 && bb->code->opcode == OP_SEQ_POINT)
12313 bb->code->flags |= MONO_INST_SINGLE_STEP_LOC;
12316 /* Add a sequence point for method entry/exit events */
12317 if (cfg->gen_seq_points_debug_data) {
12318 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
12319 MONO_ADD_INS (init_localsbb, ins);
12320 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
12321 MONO_ADD_INS (cfg->bb_exit, ins);
12325 * Add seq points for IL offsets which have line number info, but wasn't generated a seq point during JITting because
12326 * the code they refer to was dead (#11880).
12328 if (sym_seq_points) {
12329 for (i = 0; i < header->code_size; ++i) {
12330 if (mono_bitset_test_fast (seq_point_locs, i) && !mono_bitset_test_fast (seq_point_set_locs, i)) {
12333 NEW_SEQ_POINT (cfg, ins, i, FALSE);
12334 mono_add_seq_point (cfg, NULL, ins, SEQ_POINT_NATIVE_OFFSET_DEAD_CODE);
12341 if (cfg->method == method) {
12342 MonoBasicBlock *bb;
12343 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
12344 bb->region = mono_find_block_region (cfg, bb->real_offset);
12346 mono_create_spvar_for_region (cfg, bb->region);
12347 if (cfg->verbose_level > 2)
12348 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
12352 if (inline_costs < 0) {
12355 /* Method is too large */
12356 mname = mono_method_full_name (method, TRUE);
12357 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
12358 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
12362 if ((cfg->verbose_level > 2) && (cfg->method == method))
12363 mono_print_code (cfg, "AFTER METHOD-TO-IR");
12368 g_assert (!mono_error_ok (&cfg->error));
12372 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
12376 set_exception_type_from_invalid_il (cfg, method, ip);
12380 g_slist_free (class_inits);
12381 mono_basic_block_free (original_bb);
12382 cfg->dont_inline = g_list_remove (cfg->dont_inline, method);
12383 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
12384 if (cfg->exception_type)
12387 return inline_costs;
12391 store_membase_reg_to_store_membase_imm (int opcode)
12394 case OP_STORE_MEMBASE_REG:
12395 return OP_STORE_MEMBASE_IMM;
12396 case OP_STOREI1_MEMBASE_REG:
12397 return OP_STOREI1_MEMBASE_IMM;
12398 case OP_STOREI2_MEMBASE_REG:
12399 return OP_STOREI2_MEMBASE_IMM;
12400 case OP_STOREI4_MEMBASE_REG:
12401 return OP_STOREI4_MEMBASE_IMM;
12402 case OP_STOREI8_MEMBASE_REG:
12403 return OP_STOREI8_MEMBASE_IMM;
12405 g_assert_not_reached ();
12412 mono_op_to_op_imm (int opcode)
12416 return OP_IADD_IMM;
12418 return OP_ISUB_IMM;
12420 return OP_IDIV_IMM;
12422 return OP_IDIV_UN_IMM;
12424 return OP_IREM_IMM;
12426 return OP_IREM_UN_IMM;
12428 return OP_IMUL_IMM;
12430 return OP_IAND_IMM;
12434 return OP_IXOR_IMM;
12436 return OP_ISHL_IMM;
12438 return OP_ISHR_IMM;
12440 return OP_ISHR_UN_IMM;
12443 return OP_LADD_IMM;
12445 return OP_LSUB_IMM;
12447 return OP_LAND_IMM;
12451 return OP_LXOR_IMM;
12453 return OP_LSHL_IMM;
12455 return OP_LSHR_IMM;
12457 return OP_LSHR_UN_IMM;
12458 #if SIZEOF_REGISTER == 8
12460 return OP_LREM_IMM;
12464 return OP_COMPARE_IMM;
12466 return OP_ICOMPARE_IMM;
12468 return OP_LCOMPARE_IMM;
12470 case OP_STORE_MEMBASE_REG:
12471 return OP_STORE_MEMBASE_IMM;
12472 case OP_STOREI1_MEMBASE_REG:
12473 return OP_STOREI1_MEMBASE_IMM;
12474 case OP_STOREI2_MEMBASE_REG:
12475 return OP_STOREI2_MEMBASE_IMM;
12476 case OP_STOREI4_MEMBASE_REG:
12477 return OP_STOREI4_MEMBASE_IMM;
12479 #if defined(TARGET_X86) || defined (TARGET_AMD64)
12481 return OP_X86_PUSH_IMM;
12482 case OP_X86_COMPARE_MEMBASE_REG:
12483 return OP_X86_COMPARE_MEMBASE_IMM;
12485 #if defined(TARGET_AMD64)
12486 case OP_AMD64_ICOMPARE_MEMBASE_REG:
12487 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
12489 case OP_VOIDCALL_REG:
12490 return OP_VOIDCALL;
12498 return OP_LOCALLOC_IMM;
12505 ldind_to_load_membase (int opcode)
12509 return OP_LOADI1_MEMBASE;
12511 return OP_LOADU1_MEMBASE;
12513 return OP_LOADI2_MEMBASE;
12515 return OP_LOADU2_MEMBASE;
12517 return OP_LOADI4_MEMBASE;
12519 return OP_LOADU4_MEMBASE;
12521 return OP_LOAD_MEMBASE;
12522 case CEE_LDIND_REF:
12523 return OP_LOAD_MEMBASE;
12525 return OP_LOADI8_MEMBASE;
12527 return OP_LOADR4_MEMBASE;
12529 return OP_LOADR8_MEMBASE;
12531 g_assert_not_reached ();
12538 stind_to_store_membase (int opcode)
12542 return OP_STOREI1_MEMBASE_REG;
12544 return OP_STOREI2_MEMBASE_REG;
12546 return OP_STOREI4_MEMBASE_REG;
12548 case CEE_STIND_REF:
12549 return OP_STORE_MEMBASE_REG;
12551 return OP_STOREI8_MEMBASE_REG;
12553 return OP_STORER4_MEMBASE_REG;
12555 return OP_STORER8_MEMBASE_REG;
12557 g_assert_not_reached ();
12564 mono_load_membase_to_load_mem (int opcode)
12566 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
12567 #if defined(TARGET_X86) || defined(TARGET_AMD64)
12569 case OP_LOAD_MEMBASE:
12570 return OP_LOAD_MEM;
12571 case OP_LOADU1_MEMBASE:
12572 return OP_LOADU1_MEM;
12573 case OP_LOADU2_MEMBASE:
12574 return OP_LOADU2_MEM;
12575 case OP_LOADI4_MEMBASE:
12576 return OP_LOADI4_MEM;
12577 case OP_LOADU4_MEMBASE:
12578 return OP_LOADU4_MEM;
12579 #if SIZEOF_REGISTER == 8
12580 case OP_LOADI8_MEMBASE:
12581 return OP_LOADI8_MEM;
12590 op_to_op_dest_membase (int store_opcode, int opcode)
12592 #if defined(TARGET_X86)
12593 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
12598 return OP_X86_ADD_MEMBASE_REG;
12600 return OP_X86_SUB_MEMBASE_REG;
12602 return OP_X86_AND_MEMBASE_REG;
12604 return OP_X86_OR_MEMBASE_REG;
12606 return OP_X86_XOR_MEMBASE_REG;
12609 return OP_X86_ADD_MEMBASE_IMM;
12612 return OP_X86_SUB_MEMBASE_IMM;
12615 return OP_X86_AND_MEMBASE_IMM;
12618 return OP_X86_OR_MEMBASE_IMM;
12621 return OP_X86_XOR_MEMBASE_IMM;
12627 #if defined(TARGET_AMD64)
12628 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
12633 return OP_X86_ADD_MEMBASE_REG;
12635 return OP_X86_SUB_MEMBASE_REG;
12637 return OP_X86_AND_MEMBASE_REG;
12639 return OP_X86_OR_MEMBASE_REG;
12641 return OP_X86_XOR_MEMBASE_REG;
12643 return OP_X86_ADD_MEMBASE_IMM;
12645 return OP_X86_SUB_MEMBASE_IMM;
12647 return OP_X86_AND_MEMBASE_IMM;
12649 return OP_X86_OR_MEMBASE_IMM;
12651 return OP_X86_XOR_MEMBASE_IMM;
12653 return OP_AMD64_ADD_MEMBASE_REG;
12655 return OP_AMD64_SUB_MEMBASE_REG;
12657 return OP_AMD64_AND_MEMBASE_REG;
12659 return OP_AMD64_OR_MEMBASE_REG;
12661 return OP_AMD64_XOR_MEMBASE_REG;
12664 return OP_AMD64_ADD_MEMBASE_IMM;
12667 return OP_AMD64_SUB_MEMBASE_IMM;
12670 return OP_AMD64_AND_MEMBASE_IMM;
12673 return OP_AMD64_OR_MEMBASE_IMM;
12676 return OP_AMD64_XOR_MEMBASE_IMM;
12686 op_to_op_store_membase (int store_opcode, int opcode)
12688 #if defined(TARGET_X86) || defined(TARGET_AMD64)
12691 if (store_opcode == OP_STOREI1_MEMBASE_REG)
12692 return OP_X86_SETEQ_MEMBASE;
12694 if (store_opcode == OP_STOREI1_MEMBASE_REG)
12695 return OP_X86_SETNE_MEMBASE;
12703 op_to_op_src1_membase (int load_opcode, int opcode)
12706 /* FIXME: This has sign extension issues */
12708 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
12709 return OP_X86_COMPARE_MEMBASE8_IMM;
12712 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
12717 return OP_X86_PUSH_MEMBASE;
12718 case OP_COMPARE_IMM:
12719 case OP_ICOMPARE_IMM:
12720 return OP_X86_COMPARE_MEMBASE_IMM;
12723 return OP_X86_COMPARE_MEMBASE_REG;
12727 #ifdef TARGET_AMD64
12728 /* FIXME: This has sign extension issues */
12730 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
12731 return OP_X86_COMPARE_MEMBASE8_IMM;
12736 #ifdef __mono_ilp32__
12737 if (load_opcode == OP_LOADI8_MEMBASE)
12739 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
12741 return OP_X86_PUSH_MEMBASE;
12743 /* FIXME: This only works for 32 bit immediates
12744 case OP_COMPARE_IMM:
12745 case OP_LCOMPARE_IMM:
12746 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
12747 return OP_AMD64_COMPARE_MEMBASE_IMM;
12749 case OP_ICOMPARE_IMM:
12750 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
12751 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
12755 #ifdef __mono_ilp32__
12756 if (load_opcode == OP_LOAD_MEMBASE)
12757 return OP_AMD64_ICOMPARE_MEMBASE_REG;
12758 if (load_opcode == OP_LOADI8_MEMBASE)
12760 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
12762 return OP_AMD64_COMPARE_MEMBASE_REG;
12765 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
12766 return OP_AMD64_ICOMPARE_MEMBASE_REG;
12775 op_to_op_src2_membase (int load_opcode, int opcode)
12778 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
12784 return OP_X86_COMPARE_REG_MEMBASE;
12786 return OP_X86_ADD_REG_MEMBASE;
12788 return OP_X86_SUB_REG_MEMBASE;
12790 return OP_X86_AND_REG_MEMBASE;
12792 return OP_X86_OR_REG_MEMBASE;
12794 return OP_X86_XOR_REG_MEMBASE;
12798 #ifdef TARGET_AMD64
12799 #ifdef __mono_ilp32__
12800 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE) ) {
12802 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)) {
12806 return OP_AMD64_ICOMPARE_REG_MEMBASE;
12808 return OP_X86_ADD_REG_MEMBASE;
12810 return OP_X86_SUB_REG_MEMBASE;
12812 return OP_X86_AND_REG_MEMBASE;
12814 return OP_X86_OR_REG_MEMBASE;
12816 return OP_X86_XOR_REG_MEMBASE;
12818 #ifdef __mono_ilp32__
12819 } else if (load_opcode == OP_LOADI8_MEMBASE) {
12821 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE)) {
12826 return OP_AMD64_COMPARE_REG_MEMBASE;
12828 return OP_AMD64_ADD_REG_MEMBASE;
12830 return OP_AMD64_SUB_REG_MEMBASE;
12832 return OP_AMD64_AND_REG_MEMBASE;
12834 return OP_AMD64_OR_REG_MEMBASE;
12836 return OP_AMD64_XOR_REG_MEMBASE;
12845 mono_op_to_op_imm_noemul (int opcode)
12848 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
12854 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
12861 #if defined(MONO_ARCH_EMULATE_MUL_DIV)
12866 return mono_op_to_op_imm (opcode);
12871 * mono_handle_global_vregs:
12873 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
12877 mono_handle_global_vregs (MonoCompile *cfg)
12879 gint32 *vreg_to_bb;
12880 MonoBasicBlock *bb;
12883 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
12885 #ifdef MONO_ARCH_SIMD_INTRINSICS
12886 if (cfg->uses_simd_intrinsics)
12887 mono_simd_simplify_indirection (cfg);
12890 /* Find local vregs used in more than one bb */
12891 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
12892 MonoInst *ins = bb->code;
12893 int block_num = bb->block_num;
12895 if (cfg->verbose_level > 2)
12896 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
12899 for (; ins; ins = ins->next) {
12900 const char *spec = INS_INFO (ins->opcode);
12901 int regtype = 0, regindex;
12904 if (G_UNLIKELY (cfg->verbose_level > 2))
12905 mono_print_ins (ins);
12907 g_assert (ins->opcode >= MONO_CEE_LAST);
12909 for (regindex = 0; regindex < 4; regindex ++) {
12912 if (regindex == 0) {
12913 regtype = spec [MONO_INST_DEST];
12914 if (regtype == ' ')
12917 } else if (regindex == 1) {
12918 regtype = spec [MONO_INST_SRC1];
12919 if (regtype == ' ')
12922 } else if (regindex == 2) {
12923 regtype = spec [MONO_INST_SRC2];
12924 if (regtype == ' ')
12927 } else if (regindex == 3) {
12928 regtype = spec [MONO_INST_SRC3];
12929 if (regtype == ' ')
12934 #if SIZEOF_REGISTER == 4
12935 /* In the LLVM case, the long opcodes are not decomposed */
12936 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
12938 * Since some instructions reference the original long vreg,
12939 * and some reference the two component vregs, it is quite hard
12940 * to determine when it needs to be global. So be conservative.
12942 if (!get_vreg_to_inst (cfg, vreg)) {
12943 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
12945 if (cfg->verbose_level > 2)
12946 printf ("LONG VREG R%d made global.\n", vreg);
12950 * Make the component vregs volatile since the optimizations can
12951 * get confused otherwise.
12953 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
12954 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
12958 g_assert (vreg != -1);
12960 prev_bb = vreg_to_bb [vreg];
12961 if (prev_bb == 0) {
12962 /* 0 is a valid block num */
12963 vreg_to_bb [vreg] = block_num + 1;
12964 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
12965 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
12968 if (!get_vreg_to_inst (cfg, vreg)) {
12969 if (G_UNLIKELY (cfg->verbose_level > 2))
12970 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
12974 if (vreg_is_ref (cfg, vreg))
12975 mono_compile_create_var_for_vreg (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL, vreg);
12977 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
12980 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
12983 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
12986 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
12989 g_assert_not_reached ();
12993 /* Flag as having been used in more than one bb */
12994 vreg_to_bb [vreg] = -1;
13000 /* If a variable is used in only one bblock, convert it into a local vreg */
13001 for (i = 0; i < cfg->num_varinfo; i++) {
13002 MonoInst *var = cfg->varinfo [i];
13003 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
13005 switch (var->type) {
13011 #if SIZEOF_REGISTER == 8
13014 #if !defined(TARGET_X86)
13015 /* Enabling this screws up the fp stack on x86 */
13018 if (mono_arch_is_soft_float ())
13021 /* Arguments are implicitly global */
13022 /* Putting R4 vars into registers doesn't work currently */
13023 /* The gsharedvt vars are implicitly referenced by ldaddr opcodes, but those opcodes are only generated later */
13024 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg && var != cfg->gsharedvt_info_var && var != cfg->gsharedvt_locals_var && var != cfg->lmf_addr_var) {
13026 * Make that the variable's liveness interval doesn't contain a call, since
13027 * that would cause the lvreg to be spilled, making the whole optimization
13030 /* This is too slow for JIT compilation */
13032 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
13034 int def_index, call_index, ins_index;
13035 gboolean spilled = FALSE;
13040 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
13041 const char *spec = INS_INFO (ins->opcode);
13043 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
13044 def_index = ins_index;
13046 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
13047 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
13048 if (call_index > def_index) {
13054 if (MONO_IS_CALL (ins))
13055 call_index = ins_index;
13065 if (G_UNLIKELY (cfg->verbose_level > 2))
13066 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
13067 var->flags |= MONO_INST_IS_DEAD;
13068 cfg->vreg_to_inst [var->dreg] = NULL;
13075 * Compress the varinfo and vars tables so the liveness computation is faster and
13076 * takes up less space.
13079 for (i = 0; i < cfg->num_varinfo; ++i) {
13080 MonoInst *var = cfg->varinfo [i];
13081 if (pos < i && cfg->locals_start == i)
13082 cfg->locals_start = pos;
13083 if (!(var->flags & MONO_INST_IS_DEAD)) {
13085 cfg->varinfo [pos] = cfg->varinfo [i];
13086 cfg->varinfo [pos]->inst_c0 = pos;
13087 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
13088 cfg->vars [pos].idx = pos;
13089 #if SIZEOF_REGISTER == 4
13090 if (cfg->varinfo [pos]->type == STACK_I8) {
13091 /* Modify the two component vars too */
13094 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
13095 var1->inst_c0 = pos;
13096 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
13097 var1->inst_c0 = pos;
13104 cfg->num_varinfo = pos;
13105 if (cfg->locals_start > cfg->num_varinfo)
13106 cfg->locals_start = cfg->num_varinfo;
13110 * mono_spill_global_vars:
13112 * Generate spill code for variables which are not allocated to registers,
13113 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
13114 * code is generated which could be optimized by the local optimization passes.
13117 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
13119 MonoBasicBlock *bb;
13121 int orig_next_vreg;
13122 guint32 *vreg_to_lvreg;
13124 guint32 i, lvregs_len;
13125 gboolean dest_has_lvreg = FALSE;
13126 guint32 stacktypes [128];
13127 MonoInst **live_range_start, **live_range_end;
13128 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
13129 int *gsharedvt_vreg_to_idx = NULL;
13131 *need_local_opts = FALSE;
13133 memset (spec2, 0, sizeof (spec2));
13135 /* FIXME: Move this function to mini.c */
13136 stacktypes ['i'] = STACK_PTR;
13137 stacktypes ['l'] = STACK_I8;
13138 stacktypes ['f'] = STACK_R8;
13139 #ifdef MONO_ARCH_SIMD_INTRINSICS
13140 stacktypes ['x'] = STACK_VTYPE;
13143 #if SIZEOF_REGISTER == 4
13144 /* Create MonoInsts for longs */
13145 for (i = 0; i < cfg->num_varinfo; i++) {
13146 MonoInst *ins = cfg->varinfo [i];
13148 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
13149 switch (ins->type) {
13154 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
13157 g_assert (ins->opcode == OP_REGOFFSET);
13159 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
13161 tree->opcode = OP_REGOFFSET;
13162 tree->inst_basereg = ins->inst_basereg;
13163 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
13165 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
13167 tree->opcode = OP_REGOFFSET;
13168 tree->inst_basereg = ins->inst_basereg;
13169 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
13179 if (cfg->compute_gc_maps) {
13180 /* registers need liveness info even for !non refs */
13181 for (i = 0; i < cfg->num_varinfo; i++) {
13182 MonoInst *ins = cfg->varinfo [i];
13184 if (ins->opcode == OP_REGVAR)
13185 ins->flags |= MONO_INST_GC_TRACK;
13189 if (cfg->gsharedvt) {
13190 gsharedvt_vreg_to_idx = mono_mempool_alloc0 (cfg->mempool, sizeof (int) * cfg->next_vreg);
13192 for (i = 0; i < cfg->num_varinfo; ++i) {
13193 MonoInst *ins = cfg->varinfo [i];
13196 if (mini_is_gsharedvt_variable_type (cfg, ins->inst_vtype)) {
13197 if (i >= cfg->locals_start) {
13199 idx = get_gsharedvt_info_slot (cfg, ins->inst_vtype, MONO_RGCTX_INFO_LOCAL_OFFSET);
13200 gsharedvt_vreg_to_idx [ins->dreg] = idx + 1;
13201 ins->opcode = OP_GSHAREDVT_LOCAL;
13202 ins->inst_imm = idx;
13205 gsharedvt_vreg_to_idx [ins->dreg] = -1;
13206 ins->opcode = OP_GSHAREDVT_ARG_REGOFFSET;
13212 /* FIXME: widening and truncation */
13215 * As an optimization, when a variable allocated to the stack is first loaded into
13216 * an lvreg, we will remember the lvreg and use it the next time instead of loading
13217 * the variable again.
13219 orig_next_vreg = cfg->next_vreg;
13220 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
13221 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
13225 * These arrays contain the first and last instructions accessing a given
13227 * Since we emit bblocks in the same order we process them here, and we
13228 * don't split live ranges, these will precisely describe the live range of
13229 * the variable, i.e. the instruction range where a valid value can be found
13230 * in the variables location.
13231 * The live range is computed using the liveness info computed by the liveness pass.
13232 * We can't use vmv->range, since that is an abstract live range, and we need
13233 * one which is instruction precise.
13234 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
13236 /* FIXME: Only do this if debugging info is requested */
13237 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
13238 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
13239 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
13240 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
13242 /* Add spill loads/stores */
13243 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13246 if (cfg->verbose_level > 2)
13247 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
13249 /* Clear vreg_to_lvreg array */
13250 for (i = 0; i < lvregs_len; i++)
13251 vreg_to_lvreg [lvregs [i]] = 0;
13255 MONO_BB_FOR_EACH_INS (bb, ins) {
13256 const char *spec = INS_INFO (ins->opcode);
13257 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
13258 gboolean store, no_lvreg;
13259 int sregs [MONO_MAX_SRC_REGS];
13261 if (G_UNLIKELY (cfg->verbose_level > 2))
13262 mono_print_ins (ins);
13264 if (ins->opcode == OP_NOP)
13268 * We handle LDADDR here as well, since it can only be decomposed
13269 * when variable addresses are known.
13271 if (ins->opcode == OP_LDADDR) {
13272 MonoInst *var = ins->inst_p0;
13274 if (var->opcode == OP_VTARG_ADDR) {
13275 /* Happens on SPARC/S390 where vtypes are passed by reference */
13276 MonoInst *vtaddr = var->inst_left;
13277 if (vtaddr->opcode == OP_REGVAR) {
13278 ins->opcode = OP_MOVE;
13279 ins->sreg1 = vtaddr->dreg;
13281 else if (var->inst_left->opcode == OP_REGOFFSET) {
13282 ins->opcode = OP_LOAD_MEMBASE;
13283 ins->inst_basereg = vtaddr->inst_basereg;
13284 ins->inst_offset = vtaddr->inst_offset;
13287 } else if (cfg->gsharedvt && gsharedvt_vreg_to_idx [var->dreg] < 0) {
13288 /* gsharedvt arg passed by ref */
13289 g_assert (var->opcode == OP_GSHAREDVT_ARG_REGOFFSET);
13291 ins->opcode = OP_LOAD_MEMBASE;
13292 ins->inst_basereg = var->inst_basereg;
13293 ins->inst_offset = var->inst_offset;
13294 } else if (cfg->gsharedvt && gsharedvt_vreg_to_idx [var->dreg]) {
13295 MonoInst *load, *load2, *load3;
13296 int idx = gsharedvt_vreg_to_idx [var->dreg] - 1;
13297 int reg1, reg2, reg3;
13298 MonoInst *info_var = cfg->gsharedvt_info_var;
13299 MonoInst *locals_var = cfg->gsharedvt_locals_var;
13303 * Compute the address of the local as gsharedvt_locals_var + gsharedvt_info_var->locals_offsets [idx].
13306 g_assert (var->opcode == OP_GSHAREDVT_LOCAL);
13308 g_assert (info_var);
13309 g_assert (locals_var);
13311 /* Mark the instruction used to compute the locals var as used */
13312 cfg->gsharedvt_locals_var_ins = NULL;
13314 /* Load the offset */
13315 if (info_var->opcode == OP_REGOFFSET) {
13316 reg1 = alloc_ireg (cfg);
13317 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, reg1, info_var->inst_basereg, info_var->inst_offset);
13318 } else if (info_var->opcode == OP_REGVAR) {
13320 reg1 = info_var->dreg;
13322 g_assert_not_reached ();
13324 reg2 = alloc_ireg (cfg);
13325 NEW_LOAD_MEMBASE (cfg, load2, OP_LOADI4_MEMBASE, reg2, reg1, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
13326 /* Load the locals area address */
13327 reg3 = alloc_ireg (cfg);
13328 if (locals_var->opcode == OP_REGOFFSET) {
13329 NEW_LOAD_MEMBASE (cfg, load3, OP_LOAD_MEMBASE, reg3, locals_var->inst_basereg, locals_var->inst_offset);
13330 } else if (locals_var->opcode == OP_REGVAR) {
13331 NEW_UNALU (cfg, load3, OP_MOVE, reg3, locals_var->dreg);
13333 g_assert_not_reached ();
13335 /* Compute the address */
13336 ins->opcode = OP_PADD;
13340 mono_bblock_insert_before_ins (bb, ins, load3);
13341 mono_bblock_insert_before_ins (bb, load3, load2);
13343 mono_bblock_insert_before_ins (bb, load2, load);
13345 g_assert (var->opcode == OP_REGOFFSET);
13347 ins->opcode = OP_ADD_IMM;
13348 ins->sreg1 = var->inst_basereg;
13349 ins->inst_imm = var->inst_offset;
13352 *need_local_opts = TRUE;
13353 spec = INS_INFO (ins->opcode);
13356 if (ins->opcode < MONO_CEE_LAST) {
13357 mono_print_ins (ins);
13358 g_assert_not_reached ();
13362 * Store opcodes have destbasereg in the dreg, but in reality, it is an
13366 if (MONO_IS_STORE_MEMBASE (ins)) {
13367 tmp_reg = ins->dreg;
13368 ins->dreg = ins->sreg2;
13369 ins->sreg2 = tmp_reg;
13372 spec2 [MONO_INST_DEST] = ' ';
13373 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
13374 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
13375 spec2 [MONO_INST_SRC3] = ' ';
13377 } else if (MONO_IS_STORE_MEMINDEX (ins))
13378 g_assert_not_reached ();
13383 if (G_UNLIKELY (cfg->verbose_level > 2)) {
13384 printf ("\t %.3s %d", spec, ins->dreg);
13385 num_sregs = mono_inst_get_src_registers (ins, sregs);
13386 for (srcindex = 0; srcindex < num_sregs; ++srcindex)
13387 printf (" %d", sregs [srcindex]);
13394 regtype = spec [MONO_INST_DEST];
13395 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
13398 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
13399 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
13400 MonoInst *store_ins;
13402 MonoInst *def_ins = ins;
13403 int dreg = ins->dreg; /* The original vreg */
13405 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
13407 if (var->opcode == OP_REGVAR) {
13408 ins->dreg = var->dreg;
13409 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
13411 * Instead of emitting a load+store, use a _membase opcode.
13413 g_assert (var->opcode == OP_REGOFFSET);
13414 if (ins->opcode == OP_MOVE) {
13418 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
13419 ins->inst_basereg = var->inst_basereg;
13420 ins->inst_offset = var->inst_offset;
13423 spec = INS_INFO (ins->opcode);
13427 g_assert (var->opcode == OP_REGOFFSET);
13429 prev_dreg = ins->dreg;
13431 /* Invalidate any previous lvreg for this vreg */
13432 vreg_to_lvreg [ins->dreg] = 0;
13436 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
13438 store_opcode = OP_STOREI8_MEMBASE_REG;
13441 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
13443 #if SIZEOF_REGISTER != 8
13444 if (regtype == 'l') {
13445 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
13446 mono_bblock_insert_after_ins (bb, ins, store_ins);
13447 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
13448 mono_bblock_insert_after_ins (bb, ins, store_ins);
13449 def_ins = store_ins;
13454 g_assert (store_opcode != OP_STOREV_MEMBASE);
13456 /* Try to fuse the store into the instruction itself */
13457 /* FIXME: Add more instructions */
13458 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
13459 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
13460 ins->inst_imm = ins->inst_c0;
13461 ins->inst_destbasereg = var->inst_basereg;
13462 ins->inst_offset = var->inst_offset;
13463 spec = INS_INFO (ins->opcode);
13464 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
13465 ins->opcode = store_opcode;
13466 ins->inst_destbasereg = var->inst_basereg;
13467 ins->inst_offset = var->inst_offset;
13471 tmp_reg = ins->dreg;
13472 ins->dreg = ins->sreg2;
13473 ins->sreg2 = tmp_reg;
13476 spec2 [MONO_INST_DEST] = ' ';
13477 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
13478 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
13479 spec2 [MONO_INST_SRC3] = ' ';
13481 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
13482 // FIXME: The backends expect the base reg to be in inst_basereg
13483 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
13485 ins->inst_basereg = var->inst_basereg;
13486 ins->inst_offset = var->inst_offset;
13487 spec = INS_INFO (ins->opcode);
13489 /* printf ("INS: "); mono_print_ins (ins); */
13490 /* Create a store instruction */
13491 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
13493 /* Insert it after the instruction */
13494 mono_bblock_insert_after_ins (bb, ins, store_ins);
13496 def_ins = store_ins;
13499 * We can't assign ins->dreg to var->dreg here, since the
13500 * sregs could use it. So set a flag, and do it after
13503 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
13504 dest_has_lvreg = TRUE;
13509 if (def_ins && !live_range_start [dreg]) {
13510 live_range_start [dreg] = def_ins;
13511 live_range_start_bb [dreg] = bb;
13514 if (cfg->compute_gc_maps && def_ins && (var->flags & MONO_INST_GC_TRACK)) {
13517 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_DEF);
13518 tmp->inst_c1 = dreg;
13519 mono_bblock_insert_after_ins (bb, def_ins, tmp);
13526 num_sregs = mono_inst_get_src_registers (ins, sregs);
13527 for (srcindex = 0; srcindex < 3; ++srcindex) {
13528 regtype = spec [MONO_INST_SRC1 + srcindex];
13529 sreg = sregs [srcindex];
13531 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
13532 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
13533 MonoInst *var = get_vreg_to_inst (cfg, sreg);
13534 MonoInst *use_ins = ins;
13535 MonoInst *load_ins;
13536 guint32 load_opcode;
13538 if (var->opcode == OP_REGVAR) {
13539 sregs [srcindex] = var->dreg;
13540 //mono_inst_set_src_registers (ins, sregs);
13541 live_range_end [sreg] = use_ins;
13542 live_range_end_bb [sreg] = bb;
13544 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
13547 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
13548 /* var->dreg is a hreg */
13549 tmp->inst_c1 = sreg;
13550 mono_bblock_insert_after_ins (bb, ins, tmp);
13556 g_assert (var->opcode == OP_REGOFFSET);
13558 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
13560 g_assert (load_opcode != OP_LOADV_MEMBASE);
13562 if (vreg_to_lvreg [sreg]) {
13563 g_assert (vreg_to_lvreg [sreg] != -1);
13565 /* The variable is already loaded to an lvreg */
13566 if (G_UNLIKELY (cfg->verbose_level > 2))
13567 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
13568 sregs [srcindex] = vreg_to_lvreg [sreg];
13569 //mono_inst_set_src_registers (ins, sregs);
13573 /* Try to fuse the load into the instruction */
13574 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
13575 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
13576 sregs [0] = var->inst_basereg;
13577 //mono_inst_set_src_registers (ins, sregs);
13578 ins->inst_offset = var->inst_offset;
13579 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
13580 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
13581 sregs [1] = var->inst_basereg;
13582 //mono_inst_set_src_registers (ins, sregs);
13583 ins->inst_offset = var->inst_offset;
13585 if (MONO_IS_REAL_MOVE (ins)) {
13586 ins->opcode = OP_NOP;
13589 //printf ("%d ", srcindex); mono_print_ins (ins);
13591 sreg = alloc_dreg (cfg, stacktypes [regtype]);
13593 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
13594 if (var->dreg == prev_dreg) {
13596 * sreg refers to the value loaded by the load
13597 * emitted below, but we need to use ins->dreg
13598 * since it refers to the store emitted earlier.
13602 g_assert (sreg != -1);
13603 vreg_to_lvreg [var->dreg] = sreg;
13604 g_assert (lvregs_len < 1024);
13605 lvregs [lvregs_len ++] = var->dreg;
13609 sregs [srcindex] = sreg;
13610 //mono_inst_set_src_registers (ins, sregs);
13612 #if SIZEOF_REGISTER != 8
13613 if (regtype == 'l') {
13614 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
13615 mono_bblock_insert_before_ins (bb, ins, load_ins);
13616 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
13617 mono_bblock_insert_before_ins (bb, ins, load_ins);
13618 use_ins = load_ins;
13623 #if SIZEOF_REGISTER == 4
13624 g_assert (load_opcode != OP_LOADI8_MEMBASE);
13626 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
13627 mono_bblock_insert_before_ins (bb, ins, load_ins);
13628 use_ins = load_ins;
13632 if (var->dreg < orig_next_vreg) {
13633 live_range_end [var->dreg] = use_ins;
13634 live_range_end_bb [var->dreg] = bb;
13637 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
13640 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
13641 tmp->inst_c1 = var->dreg;
13642 mono_bblock_insert_after_ins (bb, ins, tmp);
13646 mono_inst_set_src_registers (ins, sregs);
13648 if (dest_has_lvreg) {
13649 g_assert (ins->dreg != -1);
13650 vreg_to_lvreg [prev_dreg] = ins->dreg;
13651 g_assert (lvregs_len < 1024);
13652 lvregs [lvregs_len ++] = prev_dreg;
13653 dest_has_lvreg = FALSE;
13657 tmp_reg = ins->dreg;
13658 ins->dreg = ins->sreg2;
13659 ins->sreg2 = tmp_reg;
13662 if (MONO_IS_CALL (ins)) {
13663 /* Clear vreg_to_lvreg array */
13664 for (i = 0; i < lvregs_len; i++)
13665 vreg_to_lvreg [lvregs [i]] = 0;
13667 } else if (ins->opcode == OP_NOP) {
13669 MONO_INST_NULLIFY_SREGS (ins);
13672 if (cfg->verbose_level > 2)
13673 mono_print_ins_index (1, ins);
13676 /* Extend the live range based on the liveness info */
13677 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
13678 for (i = 0; i < cfg->num_varinfo; i ++) {
13679 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
13681 if (vreg_is_volatile (cfg, vi->vreg))
13682 /* The liveness info is incomplete */
13685 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
13686 /* Live from at least the first ins of this bb */
13687 live_range_start [vi->vreg] = bb->code;
13688 live_range_start_bb [vi->vreg] = bb;
13691 if (mono_bitset_test_fast (bb->live_out_set, i)) {
13692 /* Live at least until the last ins of this bb */
13693 live_range_end [vi->vreg] = bb->last_ins;
13694 live_range_end_bb [vi->vreg] = bb;
13700 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
13702 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
13703 * by storing the current native offset into MonoMethodVar->live_range_start/end.
13705 if (cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
13706 for (i = 0; i < cfg->num_varinfo; ++i) {
13707 int vreg = MONO_VARINFO (cfg, i)->vreg;
13710 if (live_range_start [vreg]) {
13711 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
13713 ins->inst_c1 = vreg;
13714 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
13716 if (live_range_end [vreg]) {
13717 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
13719 ins->inst_c1 = vreg;
13720 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
13721 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
13723 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
13729 if (cfg->gsharedvt_locals_var_ins) {
13730 /* Nullify if unused */
13731 cfg->gsharedvt_locals_var_ins->opcode = OP_PCONST;
13732 cfg->gsharedvt_locals_var_ins->inst_imm = 0;
13735 g_free (live_range_start);
13736 g_free (live_range_end);
13737 g_free (live_range_start_bb);
13738 g_free (live_range_end_bb);
13743 * - use 'iadd' instead of 'int_add'
13744 * - handling ovf opcodes: decompose in method_to_ir.
13745 * - unify iregs/fregs
13746 * -> partly done, the missing parts are:
13747 * - a more complete unification would involve unifying the hregs as well, so
13748 * code wouldn't need if (fp) all over the place. but that would mean the hregs
13749 * would no longer map to the machine hregs, so the code generators would need to
13750 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
13751 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
13752 * fp/non-fp branches speeds it up by about 15%.
13753 * - use sext/zext opcodes instead of shifts
13755 * - get rid of TEMPLOADs if possible and use vregs instead
13756 * - clean up usage of OP_P/OP_ opcodes
13757 * - cleanup usage of DUMMY_USE
13758 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
13760 * - set the stack type and allocate a dreg in the EMIT_NEW macros
13761 * - get rid of all the <foo>2 stuff when the new JIT is ready.
13762 * - make sure handle_stack_args () is called before the branch is emitted
13763 * - when the new IR is done, get rid of all unused stuff
13764 * - COMPARE/BEQ as separate instructions or unify them ?
13765 * - keeping them separate allows specialized compare instructions like
13766 * compare_imm, compare_membase
13767 * - most back ends unify fp compare+branch, fp compare+ceq
13768 * - integrate mono_save_args into inline_method
13769 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
13770 * - handle long shift opts on 32 bit platforms somehow: they require
13771 * 3 sregs (2 for arg1 and 1 for arg2)
13772 * - make byref a 'normal' type.
13773 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
13774 * variable if needed.
13775 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
13776 * like inline_method.
13777 * - remove inlining restrictions
13778 * - fix LNEG and enable cfold of INEG
13779 * - generalize x86 optimizations like ldelema as a peephole optimization
13780 * - add store_mem_imm for amd64
13781 * - optimize the loading of the interruption flag in the managed->native wrappers
13782 * - avoid special handling of OP_NOP in passes
13783 * - move code inserting instructions into one function/macro.
13784 * - try a coalescing phase after liveness analysis
13785 * - add float -> vreg conversion + local optimizations on !x86
13786 * - figure out how to handle decomposed branches during optimizations, ie.
13787 * compare+branch, op_jump_table+op_br etc.
13788 * - promote RuntimeXHandles to vregs
13789 * - vtype cleanups:
13790 * - add a NEW_VARLOADA_VREG macro
13791 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
13792 * accessing vtype fields.
13793 * - get rid of I8CONST on 64 bit platforms
13794 * - dealing with the increase in code size due to branches created during opcode
13796 * - use extended basic blocks
13797 * - all parts of the JIT
13798 * - handle_global_vregs () && local regalloc
13799 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
13800 * - sources of increase in code size:
13803 * - isinst and castclass
13804 * - lvregs not allocated to global registers even if used multiple times
13805 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
13807 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
13808 * - add all micro optimizations from the old JIT
13809 * - put tree optimizations into the deadce pass
13810 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
13811 * specific function.
13812 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
13813 * fcompare + branchCC.
13814 * - create a helper function for allocating a stack slot, taking into account
13815 * MONO_CFG_HAS_SPILLUP.
13817 * - merge the ia64 switch changes.
13818 * - optimize mono_regstate2_alloc_int/float.
13819 * - fix the pessimistic handling of variables accessed in exception handler blocks.
13820 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
13821 * parts of the tree could be separated by other instructions, killing the tree
13822 * arguments, or stores killing loads etc. Also, should we fold loads into other
13823 * instructions if the result of the load is used multiple times ?
13824 * - make the REM_IMM optimization in mini-x86.c arch-independent.
13825 * - LAST MERGE: 108395.
13826 * - when returning vtypes in registers, generate IR and append it to the end of the
13827 * last bb instead of doing it in the epilog.
13828 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
13836 - When to decompose opcodes:
13837 - earlier: this makes some optimizations hard to implement, since the low level IR
13838 no longer contains the neccessary information. But it is easier to do.
13839 - later: harder to implement, enables more optimizations.
13840 - Branches inside bblocks:
13841 - created when decomposing complex opcodes.
13842 - branches to another bblock: harmless, but not tracked by the branch
13843 optimizations, so need to branch to a label at the start of the bblock.
13844 - branches to inside the same bblock: very problematic, trips up the local
13845 reg allocator. Can be fixed by spitting the current bblock, but that is a
13846 complex operation, since some local vregs can become global vregs etc.
13847 - Local/global vregs:
13848 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
13849 local register allocator.
13850 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
13851 structure, created by mono_create_var (). Assigned to hregs or the stack by
13852 the global register allocator.
13853 - When to do optimizations like alu->alu_imm:
13854 - earlier -> saves work later on since the IR will be smaller/simpler
13855 - later -> can work on more instructions
13856 - Handling of valuetypes:
13857 - When a vtype is pushed on the stack, a new temporary is created, an
13858 instruction computing its address (LDADDR) is emitted and pushed on
13859 the stack. Need to optimize cases when the vtype is used immediately as in
13860 argument passing, stloc etc.
13861 - Instead of the to_end stuff in the old JIT, simply call the function handling
13862 the values on the stack before emitting the last instruction of the bb.
13865 #endif /* DISABLE_JIT */