2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
9 * Copyright 2003-2010 Novell, Inc (http://www.novell.com)
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
27 #ifdef HAVE_SYS_TIME_H
35 #include <mono/utils/memcheck.h>
37 #include <mono/metadata/abi-details.h>
38 #include <mono/metadata/assembly.h>
39 #include <mono/metadata/attrdefs.h>
40 #include <mono/metadata/loader.h>
41 #include <mono/metadata/tabledefs.h>
42 #include <mono/metadata/class.h>
43 #include <mono/metadata/object.h>
44 #include <mono/metadata/exception.h>
45 #include <mono/metadata/opcodes.h>
46 #include <mono/metadata/mono-endian.h>
47 #include <mono/metadata/tokentype.h>
48 #include <mono/metadata/tabledefs.h>
49 #include <mono/metadata/marshal.h>
50 #include <mono/metadata/debug-helpers.h>
51 #include <mono/metadata/mono-debug.h>
52 #include <mono/metadata/gc-internal.h>
53 #include <mono/metadata/security-manager.h>
54 #include <mono/metadata/threads-types.h>
55 #include <mono/metadata/security-core-clr.h>
56 #include <mono/metadata/monitor.h>
57 #include <mono/metadata/profiler-private.h>
58 #include <mono/metadata/profiler.h>
59 #include <mono/metadata/debug-mono-symfile.h>
60 #include <mono/utils/mono-compiler.h>
61 #include <mono/utils/mono-memory-model.h>
62 #include <mono/metadata/mono-basic-block.h>
68 #include "jit-icalls.h"
70 #include "debugger-agent.h"
71 #include "seq-points.h"
73 #define BRANCH_COST 10
74 #define INLINE_LENGTH_LIMIT 20
76 /* These have 'cfg' as an implicit argument */
77 #define INLINE_FAILURE(msg) do { \
78 if ((cfg->method != cfg->current_method) && (cfg->current_method->wrapper_type == MONO_WRAPPER_NONE)) { \
79 inline_failure (cfg, msg); \
80 goto exception_exit; \
83 #define CHECK_CFG_EXCEPTION do {\
84 if (cfg->exception_type != MONO_EXCEPTION_NONE) \
85 goto exception_exit; \
87 #define METHOD_ACCESS_FAILURE(method, cmethod) do { \
88 method_access_failure ((cfg), (method), (cmethod)); \
89 goto exception_exit; \
91 #define FIELD_ACCESS_FAILURE(method, field) do { \
92 field_access_failure ((cfg), (method), (field)); \
93 goto exception_exit; \
95 #define GENERIC_SHARING_FAILURE(opcode) do { \
97 gshared_failure (cfg, opcode, __FILE__, __LINE__); \
98 goto exception_exit; \
101 #define GSHAREDVT_FAILURE(opcode) do { \
102 if (cfg->gsharedvt) { \
103 gsharedvt_failure (cfg, opcode, __FILE__, __LINE__); \
104 goto exception_exit; \
107 #define OUT_OF_MEMORY_FAILURE do { \
108 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OUT_OF_MEMORY); \
109 goto exception_exit; \
111 #define DISABLE_AOT(cfg) do { \
112 if ((cfg)->verbose_level >= 2) \
113 printf ("AOT disabled: %s:%d\n", __FILE__, __LINE__); \
114 (cfg)->disable_aot = TRUE; \
116 #define LOAD_ERROR do { \
117 break_on_unverified (); \
118 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD); \
119 goto exception_exit; \
122 #define TYPE_LOAD_ERROR(klass) do { \
123 cfg->exception_ptr = klass; \
127 #define CHECK_CFG_ERROR do {\
128 if (!mono_error_ok (&cfg->error)) { \
129 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); \
130 goto mono_error_exit; \
134 /* Determine whenever 'ins' represents a load of the 'this' argument */
135 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
137 static int ldind_to_load_membase (int opcode);
138 static int stind_to_store_membase (int opcode);
140 int mono_op_to_op_imm (int opcode);
141 int mono_op_to_op_imm_noemul (int opcode);
143 MONO_API MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
145 static int inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
146 guchar *ip, guint real_offset, gboolean inline_always, MonoBasicBlock **out_cbb);
148 /* helper methods signatures */
149 static MonoMethodSignature *helper_sig_class_init_trampoline;
150 static MonoMethodSignature *helper_sig_domain_get;
151 static MonoMethodSignature *helper_sig_generic_class_init_trampoline;
152 static MonoMethodSignature *helper_sig_generic_class_init_trampoline_llvm;
153 static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
154 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline;
155 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline_llvm;
156 static MonoMethodSignature *helper_sig_monitor_enter_v4_trampoline_llvm;
159 * Instruction metadata
167 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
168 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
174 #if SIZEOF_REGISTER == 8 && SIZEOF_REGISTER == SIZEOF_VOID_P
179 /* keep in sync with the enum in mini.h */
182 #include "mini-ops.h"
187 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
188 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
190 * This should contain the index of the last sreg + 1. This is not the same
191 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
193 const gint8 ins_sreg_counts[] = {
194 #include "mini-ops.h"
199 #define MONO_INIT_VARINFO(vi,id) do { \
200 (vi)->range.first_use.pos.bid = 0xffff; \
206 mono_alloc_ireg (MonoCompile *cfg)
208 return alloc_ireg (cfg);
212 mono_alloc_lreg (MonoCompile *cfg)
214 return alloc_lreg (cfg);
218 mono_alloc_freg (MonoCompile *cfg)
220 return alloc_freg (cfg);
224 mono_alloc_preg (MonoCompile *cfg)
226 return alloc_preg (cfg);
230 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
232 return alloc_dreg (cfg, stack_type);
236 * mono_alloc_ireg_ref:
238 * Allocate an IREG, and mark it as holding a GC ref.
241 mono_alloc_ireg_ref (MonoCompile *cfg)
243 return alloc_ireg_ref (cfg);
247 * mono_alloc_ireg_mp:
249 * Allocate an IREG, and mark it as holding a managed pointer.
252 mono_alloc_ireg_mp (MonoCompile *cfg)
254 return alloc_ireg_mp (cfg);
258 * mono_alloc_ireg_copy:
260 * Allocate an IREG with the same GC type as VREG.
263 mono_alloc_ireg_copy (MonoCompile *cfg, guint32 vreg)
265 if (vreg_is_ref (cfg, vreg))
266 return alloc_ireg_ref (cfg);
267 else if (vreg_is_mp (cfg, vreg))
268 return alloc_ireg_mp (cfg);
270 return alloc_ireg (cfg);
274 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
279 type = mini_replace_type (type);
281 switch (type->type) {
284 case MONO_TYPE_BOOLEAN:
296 case MONO_TYPE_FNPTR:
298 case MONO_TYPE_CLASS:
299 case MONO_TYPE_STRING:
300 case MONO_TYPE_OBJECT:
301 case MONO_TYPE_SZARRAY:
302 case MONO_TYPE_ARRAY:
306 #if SIZEOF_REGISTER == 8
315 case MONO_TYPE_VALUETYPE:
316 if (type->data.klass->enumtype) {
317 type = mono_class_enum_basetype (type->data.klass);
320 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
323 case MONO_TYPE_TYPEDBYREF:
325 case MONO_TYPE_GENERICINST:
326 type = &type->data.generic_class->container_class->byval_arg;
330 g_assert (cfg->generic_sharing_context);
331 if (mini_type_var_is_vt (cfg, type))
336 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
342 mono_print_bb (MonoBasicBlock *bb, const char *msg)
347 printf ("\n%s %d: [IN: ", msg, bb->block_num);
348 for (i = 0; i < bb->in_count; ++i)
349 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
351 for (i = 0; i < bb->out_count; ++i)
352 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
354 for (tree = bb->code; tree; tree = tree->next)
355 mono_print_ins_index (-1, tree);
359 mono_create_helper_signatures (void)
361 helper_sig_domain_get = mono_create_icall_signature ("ptr");
362 helper_sig_class_init_trampoline = mono_create_icall_signature ("void");
363 helper_sig_generic_class_init_trampoline = mono_create_icall_signature ("void");
364 helper_sig_generic_class_init_trampoline_llvm = mono_create_icall_signature ("void ptr");
365 helper_sig_rgctx_lazy_fetch_trampoline = mono_create_icall_signature ("ptr ptr");
366 helper_sig_monitor_enter_exit_trampoline = mono_create_icall_signature ("void");
367 helper_sig_monitor_enter_exit_trampoline_llvm = mono_create_icall_signature ("void object");
368 helper_sig_monitor_enter_v4_trampoline_llvm = mono_create_icall_signature ("void object ptr");
371 static MONO_NEVER_INLINE void
372 break_on_unverified (void)
374 if (mini_get_debug_options ()->break_on_unverified)
378 static MONO_NEVER_INLINE void
379 method_access_failure (MonoCompile *cfg, MonoMethod *method, MonoMethod *cil_method)
381 char *method_fname = mono_method_full_name (method, TRUE);
382 char *cil_method_fname = mono_method_full_name (cil_method, TRUE);
383 mono_cfg_set_exception (cfg, MONO_EXCEPTION_METHOD_ACCESS);
384 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname);
385 g_free (method_fname);
386 g_free (cil_method_fname);
389 static MONO_NEVER_INLINE void
390 field_access_failure (MonoCompile *cfg, MonoMethod *method, MonoClassField *field)
392 char *method_fname = mono_method_full_name (method, TRUE);
393 char *field_fname = mono_field_full_name (field);
394 mono_cfg_set_exception (cfg, MONO_EXCEPTION_FIELD_ACCESS);
395 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname);
396 g_free (method_fname);
397 g_free (field_fname);
400 static MONO_NEVER_INLINE void
401 inline_failure (MonoCompile *cfg, const char *msg)
403 if (cfg->verbose_level >= 2)
404 printf ("inline failed: %s\n", msg);
405 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INLINE_FAILED);
408 static MONO_NEVER_INLINE void
409 gshared_failure (MonoCompile *cfg, int opcode, const char *file, int line)
411 if (cfg->verbose_level > 2) \
412 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), __LINE__);
413 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
416 static MONO_NEVER_INLINE void
417 gsharedvt_failure (MonoCompile *cfg, int opcode, const char *file, int line)
419 cfg->exception_message = g_strdup_printf ("gsharedvt failed for method %s.%s.%s/%d opcode %s %s:%d", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), file, line);
420 if (cfg->verbose_level >= 2)
421 printf ("%s\n", cfg->exception_message);
422 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
426 * When using gsharedvt, some instatiations might be verifiable, and some might be not. i.e.
427 * foo<T> (int i) { ldarg.0; box T; }
429 #define UNVERIFIED do { \
430 if (cfg->gsharedvt) { \
431 if (cfg->verbose_level > 2) \
432 printf ("gsharedvt method failed to verify, falling back to instantiation.\n"); \
433 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
434 goto exception_exit; \
436 break_on_unverified (); \
440 #define GET_BBLOCK(cfg,tblock,ip) do { \
441 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
443 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
444 NEW_BBLOCK (cfg, (tblock)); \
445 (tblock)->cil_code = (ip); \
446 ADD_BBLOCK (cfg, (tblock)); \
450 #if defined(TARGET_X86) || defined(TARGET_AMD64)
451 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
452 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
453 (dest)->dreg = alloc_ireg_mp ((cfg)); \
454 (dest)->sreg1 = (sr1); \
455 (dest)->sreg2 = (sr2); \
456 (dest)->inst_imm = (imm); \
457 (dest)->backend.shift_amount = (shift); \
458 MONO_ADD_INS ((cfg)->cbb, (dest)); \
462 #if SIZEOF_REGISTER == 8
463 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
464 /* FIXME: Need to add many more cases */ \
465 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
467 int dr = alloc_preg (cfg); \
468 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
469 (ins)->sreg2 = widen->dreg; \
473 #define ADD_WIDEN_OP(ins, arg1, arg2)
476 #define ADD_BINOP(op) do { \
477 MONO_INST_NEW (cfg, ins, (op)); \
479 ins->sreg1 = sp [0]->dreg; \
480 ins->sreg2 = sp [1]->dreg; \
481 type_from_op (ins, sp [0], sp [1]); \
483 /* Have to insert a widening op */ \
484 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
485 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
486 MONO_ADD_INS ((cfg)->cbb, (ins)); \
487 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
490 #define ADD_UNOP(op) do { \
491 MONO_INST_NEW (cfg, ins, (op)); \
493 ins->sreg1 = sp [0]->dreg; \
494 type_from_op (ins, sp [0], NULL); \
496 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
497 MONO_ADD_INS ((cfg)->cbb, (ins)); \
498 *sp++ = mono_decompose_opcode (cfg, ins); \
501 #define ADD_BINCOND(next_block) do { \
504 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
505 cmp->sreg1 = sp [0]->dreg; \
506 cmp->sreg2 = sp [1]->dreg; \
507 type_from_op (cmp, sp [0], sp [1]); \
509 type_from_op (ins, sp [0], sp [1]); \
510 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
511 GET_BBLOCK (cfg, tblock, target); \
512 link_bblock (cfg, bblock, tblock); \
513 ins->inst_true_bb = tblock; \
514 if ((next_block)) { \
515 link_bblock (cfg, bblock, (next_block)); \
516 ins->inst_false_bb = (next_block); \
517 start_new_bblock = 1; \
519 GET_BBLOCK (cfg, tblock, ip); \
520 link_bblock (cfg, bblock, tblock); \
521 ins->inst_false_bb = tblock; \
522 start_new_bblock = 2; \
524 if (sp != stack_start) { \
525 handle_stack_args (cfg, stack_start, sp - stack_start); \
526 CHECK_UNVERIFIABLE (cfg); \
528 MONO_ADD_INS (bblock, cmp); \
529 MONO_ADD_INS (bblock, ins); \
533 * link_bblock: Links two basic blocks
535 * links two basic blocks in the control flow graph, the 'from'
536 * argument is the starting block and the 'to' argument is the block
537 * the control flow ends to after 'from'.
540 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
542 MonoBasicBlock **newa;
546 if (from->cil_code) {
548 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
550 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
553 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
555 printf ("edge from entry to exit\n");
560 for (i = 0; i < from->out_count; ++i) {
561 if (to == from->out_bb [i]) {
567 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
568 for (i = 0; i < from->out_count; ++i) {
569 newa [i] = from->out_bb [i];
577 for (i = 0; i < to->in_count; ++i) {
578 if (from == to->in_bb [i]) {
584 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
585 for (i = 0; i < to->in_count; ++i) {
586 newa [i] = to->in_bb [i];
595 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
597 link_bblock (cfg, from, to);
601 * mono_find_block_region:
603 * We mark each basic block with a region ID. We use that to avoid BB
604 * optimizations when blocks are in different regions.
607 * A region token that encodes where this region is, and information
608 * about the clause owner for this block.
610 * The region encodes the try/catch/filter clause that owns this block
611 * as well as the type. -1 is a special value that represents a block
612 * that is in none of try/catch/filter.
615 mono_find_block_region (MonoCompile *cfg, int offset)
617 MonoMethodHeader *header = cfg->header;
618 MonoExceptionClause *clause;
621 for (i = 0; i < header->num_clauses; ++i) {
622 clause = &header->clauses [i];
623 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
624 (offset < (clause->handler_offset)))
625 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
627 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
628 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
629 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
630 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
631 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
633 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
636 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
637 return ((i + 1) << 8) | clause->flags;
644 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
646 MonoMethodHeader *header = cfg->header;
647 MonoExceptionClause *clause;
651 for (i = 0; i < header->num_clauses; ++i) {
652 clause = &header->clauses [i];
653 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
654 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
655 if (clause->flags == type)
656 res = g_list_append (res, clause);
663 mono_create_spvar_for_region (MonoCompile *cfg, int region)
667 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
671 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
672 /* prevent it from being register allocated */
673 var->flags |= MONO_INST_VOLATILE;
675 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
679 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
681 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
685 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
689 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
693 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
694 /* prevent it from being register allocated */
695 var->flags |= MONO_INST_VOLATILE;
697 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
703 * Returns the type used in the eval stack when @type is loaded.
704 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
707 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
711 type = mini_replace_type (type);
712 inst->klass = klass = mono_class_from_mono_type (type);
714 inst->type = STACK_MP;
719 switch (type->type) {
721 inst->type = STACK_INV;
725 case MONO_TYPE_BOOLEAN:
731 inst->type = STACK_I4;
736 case MONO_TYPE_FNPTR:
737 inst->type = STACK_PTR;
739 case MONO_TYPE_CLASS:
740 case MONO_TYPE_STRING:
741 case MONO_TYPE_OBJECT:
742 case MONO_TYPE_SZARRAY:
743 case MONO_TYPE_ARRAY:
744 inst->type = STACK_OBJ;
748 inst->type = STACK_I8;
752 inst->type = STACK_R8;
754 case MONO_TYPE_VALUETYPE:
755 if (type->data.klass->enumtype) {
756 type = mono_class_enum_basetype (type->data.klass);
760 inst->type = STACK_VTYPE;
763 case MONO_TYPE_TYPEDBYREF:
764 inst->klass = mono_defaults.typed_reference_class;
765 inst->type = STACK_VTYPE;
767 case MONO_TYPE_GENERICINST:
768 type = &type->data.generic_class->container_class->byval_arg;
772 g_assert (cfg->generic_sharing_context);
773 if (mini_is_gsharedvt_type (cfg, type)) {
774 g_assert (cfg->gsharedvt);
775 inst->type = STACK_VTYPE;
777 inst->type = STACK_OBJ;
781 g_error ("unknown type 0x%02x in eval stack type", type->type);
786 * The following tables are used to quickly validate the IL code in type_from_op ().
789 bin_num_table [STACK_MAX] [STACK_MAX] = {
790 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
791 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
792 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
793 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
794 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV},
795 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
796 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
797 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
802 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV
805 /* reduce the size of this table */
807 bin_int_table [STACK_MAX] [STACK_MAX] = {
808 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
809 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
810 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
811 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
812 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
813 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
814 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
815 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
819 bin_comp_table [STACK_MAX] [STACK_MAX] = {
820 /* Inv i L p F & O vt */
822 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
823 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
824 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
825 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
826 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
827 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
828 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
831 /* reduce the size of this table */
833 shift_table [STACK_MAX] [STACK_MAX] = {
834 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
835 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
836 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
837 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
838 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
839 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
840 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
841 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
845 * Tables to map from the non-specific opcode to the matching
846 * type-specific opcode.
848 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
850 binops_op_map [STACK_MAX] = {
851 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD
854 /* handles from CEE_NEG to CEE_CONV_U8 */
856 unops_op_map [STACK_MAX] = {
857 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG
860 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
862 ovfops_op_map [STACK_MAX] = {
863 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2
866 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
868 ovf2ops_op_map [STACK_MAX] = {
869 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
872 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
874 ovf3ops_op_map [STACK_MAX] = {
875 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1
878 /* handles from CEE_BEQ to CEE_BLT_UN */
880 beqops_op_map [STACK_MAX] = {
881 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ
884 /* handles from CEE_CEQ to CEE_CLT_UN */
886 ceqops_op_map [STACK_MAX] = {
887 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ
891 * Sets ins->type (the type on the eval stack) according to the
892 * type of the opcode and the arguments to it.
893 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
895 * FIXME: this function sets ins->type unconditionally in some cases, but
896 * it should set it to invalid for some types (a conv.x on an object)
899 type_from_op (MonoInst *ins, MonoInst *src1, MonoInst *src2) {
901 switch (ins->opcode) {
908 /* FIXME: check unverifiable args for STACK_MP */
909 ins->type = bin_num_table [src1->type] [src2->type];
910 ins->opcode += binops_op_map [ins->type];
917 ins->type = bin_int_table [src1->type] [src2->type];
918 ins->opcode += binops_op_map [ins->type];
923 ins->type = shift_table [src1->type] [src2->type];
924 ins->opcode += binops_op_map [ins->type];
929 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
930 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
931 ins->opcode = OP_LCOMPARE;
932 else if (src1->type == STACK_R8)
933 ins->opcode = OP_FCOMPARE;
935 ins->opcode = OP_ICOMPARE;
937 case OP_ICOMPARE_IMM:
938 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
939 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
940 ins->opcode = OP_LCOMPARE_IMM;
952 ins->opcode += beqops_op_map [src1->type];
955 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
956 ins->opcode += ceqops_op_map [src1->type];
962 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
963 ins->opcode += ceqops_op_map [src1->type];
967 ins->type = neg_table [src1->type];
968 ins->opcode += unops_op_map [ins->type];
971 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
972 ins->type = src1->type;
974 ins->type = STACK_INV;
975 ins->opcode += unops_op_map [ins->type];
981 ins->type = STACK_I4;
982 ins->opcode += unops_op_map [src1->type];
985 ins->type = STACK_R8;
986 switch (src1->type) {
989 ins->opcode = OP_ICONV_TO_R_UN;
992 ins->opcode = OP_LCONV_TO_R_UN;
996 case CEE_CONV_OVF_I1:
997 case CEE_CONV_OVF_U1:
998 case CEE_CONV_OVF_I2:
999 case CEE_CONV_OVF_U2:
1000 case CEE_CONV_OVF_I4:
1001 case CEE_CONV_OVF_U4:
1002 ins->type = STACK_I4;
1003 ins->opcode += ovf3ops_op_map [src1->type];
1005 case CEE_CONV_OVF_I_UN:
1006 case CEE_CONV_OVF_U_UN:
1007 ins->type = STACK_PTR;
1008 ins->opcode += ovf2ops_op_map [src1->type];
1010 case CEE_CONV_OVF_I1_UN:
1011 case CEE_CONV_OVF_I2_UN:
1012 case CEE_CONV_OVF_I4_UN:
1013 case CEE_CONV_OVF_U1_UN:
1014 case CEE_CONV_OVF_U2_UN:
1015 case CEE_CONV_OVF_U4_UN:
1016 ins->type = STACK_I4;
1017 ins->opcode += ovf2ops_op_map [src1->type];
1020 ins->type = STACK_PTR;
1021 switch (src1->type) {
1023 ins->opcode = OP_ICONV_TO_U;
1027 #if SIZEOF_VOID_P == 8
1028 ins->opcode = OP_LCONV_TO_U;
1030 ins->opcode = OP_MOVE;
1034 ins->opcode = OP_LCONV_TO_U;
1037 ins->opcode = OP_FCONV_TO_U;
1043 ins->type = STACK_I8;
1044 ins->opcode += unops_op_map [src1->type];
1046 case CEE_CONV_OVF_I8:
1047 case CEE_CONV_OVF_U8:
1048 ins->type = STACK_I8;
1049 ins->opcode += ovf3ops_op_map [src1->type];
1051 case CEE_CONV_OVF_U8_UN:
1052 case CEE_CONV_OVF_I8_UN:
1053 ins->type = STACK_I8;
1054 ins->opcode += ovf2ops_op_map [src1->type];
1058 ins->type = STACK_R8;
1059 ins->opcode += unops_op_map [src1->type];
1062 ins->type = STACK_R8;
1066 ins->type = STACK_I4;
1067 ins->opcode += ovfops_op_map [src1->type];
1070 case CEE_CONV_OVF_I:
1071 case CEE_CONV_OVF_U:
1072 ins->type = STACK_PTR;
1073 ins->opcode += ovfops_op_map [src1->type];
1076 case CEE_ADD_OVF_UN:
1078 case CEE_MUL_OVF_UN:
1080 case CEE_SUB_OVF_UN:
1081 ins->type = bin_num_table [src1->type] [src2->type];
1082 ins->opcode += ovfops_op_map [src1->type];
1083 if (ins->type == STACK_R8)
1084 ins->type = STACK_INV;
1086 case OP_LOAD_MEMBASE:
1087 ins->type = STACK_PTR;
1089 case OP_LOADI1_MEMBASE:
1090 case OP_LOADU1_MEMBASE:
1091 case OP_LOADI2_MEMBASE:
1092 case OP_LOADU2_MEMBASE:
1093 case OP_LOADI4_MEMBASE:
1094 case OP_LOADU4_MEMBASE:
1095 ins->type = STACK_PTR;
1097 case OP_LOADI8_MEMBASE:
1098 ins->type = STACK_I8;
1100 case OP_LOADR4_MEMBASE:
1101 case OP_LOADR8_MEMBASE:
1102 ins->type = STACK_R8;
1105 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
1109 if (ins->type == STACK_MP)
1110 ins->klass = mono_defaults.object_class;
1115 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
1121 param_table [STACK_MAX] [STACK_MAX] = {
1126 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
1130 switch (args->type) {
1140 for (i = 0; i < sig->param_count; ++i) {
1141 switch (args [i].type) {
1145 if (!sig->params [i]->byref)
1149 if (sig->params [i]->byref)
1151 switch (sig->params [i]->type) {
1152 case MONO_TYPE_CLASS:
1153 case MONO_TYPE_STRING:
1154 case MONO_TYPE_OBJECT:
1155 case MONO_TYPE_SZARRAY:
1156 case MONO_TYPE_ARRAY:
1163 if (sig->params [i]->byref)
1165 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1174 /*if (!param_table [args [i].type] [sig->params [i]->type])
1182 * When we need a pointer to the current domain many times in a method, we
1183 * call mono_domain_get() once and we store the result in a local variable.
1184 * This function returns the variable that represents the MonoDomain*.
1186 inline static MonoInst *
1187 mono_get_domainvar (MonoCompile *cfg)
1189 if (!cfg->domainvar)
1190 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1191 return cfg->domainvar;
1195 * The got_var contains the address of the Global Offset Table when AOT
1199 mono_get_got_var (MonoCompile *cfg)
1201 #ifdef MONO_ARCH_NEED_GOT_VAR
1202 if (!cfg->compile_aot)
1204 if (!cfg->got_var) {
1205 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1207 return cfg->got_var;
1214 mono_get_vtable_var (MonoCompile *cfg)
1216 g_assert (cfg->generic_sharing_context);
1218 if (!cfg->rgctx_var) {
1219 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1220 /* force the var to be stack allocated */
1221 cfg->rgctx_var->flags |= MONO_INST_VOLATILE;
1224 return cfg->rgctx_var;
1228 type_from_stack_type (MonoInst *ins) {
1229 switch (ins->type) {
1230 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1231 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1232 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1233 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1235 return &ins->klass->this_arg;
1236 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1237 case STACK_VTYPE: return &ins->klass->byval_arg;
1239 g_error ("stack type %d to monotype not handled\n", ins->type);
1244 static G_GNUC_UNUSED int
1245 type_to_stack_type (MonoType *t)
1247 t = mono_type_get_underlying_type (t);
1251 case MONO_TYPE_BOOLEAN:
1254 case MONO_TYPE_CHAR:
1261 case MONO_TYPE_FNPTR:
1263 case MONO_TYPE_CLASS:
1264 case MONO_TYPE_STRING:
1265 case MONO_TYPE_OBJECT:
1266 case MONO_TYPE_SZARRAY:
1267 case MONO_TYPE_ARRAY:
1275 case MONO_TYPE_VALUETYPE:
1276 case MONO_TYPE_TYPEDBYREF:
1278 case MONO_TYPE_GENERICINST:
1279 if (mono_type_generic_inst_is_valuetype (t))
1285 g_assert_not_reached ();
1292 array_access_to_klass (int opcode)
1296 return mono_defaults.byte_class;
1298 return mono_defaults.uint16_class;
1301 return mono_defaults.int_class;
1304 return mono_defaults.sbyte_class;
1307 return mono_defaults.int16_class;
1310 return mono_defaults.int32_class;
1312 return mono_defaults.uint32_class;
1315 return mono_defaults.int64_class;
1318 return mono_defaults.single_class;
1321 return mono_defaults.double_class;
1322 case CEE_LDELEM_REF:
1323 case CEE_STELEM_REF:
1324 return mono_defaults.object_class;
1326 g_assert_not_reached ();
1332 * We try to share variables when possible
1335 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1340 /* inlining can result in deeper stacks */
1341 if (slot >= cfg->header->max_stack)
1342 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1344 pos = ins->type - 1 + slot * STACK_MAX;
1346 switch (ins->type) {
1353 if ((vnum = cfg->intvars [pos]))
1354 return cfg->varinfo [vnum];
1355 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1356 cfg->intvars [pos] = res->inst_c0;
1359 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1365 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1368 * Don't use this if a generic_context is set, since that means AOT can't
1369 * look up the method using just the image+token.
1370 * table == 0 means this is a reference made from a wrapper.
1372 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1373 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1374 jump_info_token->image = image;
1375 jump_info_token->token = token;
1376 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1381 * This function is called to handle items that are left on the evaluation stack
1382 * at basic block boundaries. What happens is that we save the values to local variables
1383 * and we reload them later when first entering the target basic block (with the
1384 * handle_loaded_temps () function).
1385 * A single joint point will use the same variables (stored in the array bb->out_stack or
1386 * bb->in_stack, if the basic block is before or after the joint point).
1388 * This function needs to be called _before_ emitting the last instruction of
1389 * the bb (i.e. before emitting a branch).
1390 * If the stack merge fails at a join point, cfg->unverifiable is set.
1393 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1396 MonoBasicBlock *bb = cfg->cbb;
1397 MonoBasicBlock *outb;
1398 MonoInst *inst, **locals;
1403 if (cfg->verbose_level > 3)
1404 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1405 if (!bb->out_scount) {
1406 bb->out_scount = count;
1407 //printf ("bblock %d has out:", bb->block_num);
1409 for (i = 0; i < bb->out_count; ++i) {
1410 outb = bb->out_bb [i];
1411 /* exception handlers are linked, but they should not be considered for stack args */
1412 if (outb->flags & BB_EXCEPTION_HANDLER)
1414 //printf (" %d", outb->block_num);
1415 if (outb->in_stack) {
1417 bb->out_stack = outb->in_stack;
1423 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1424 for (i = 0; i < count; ++i) {
1426 * try to reuse temps already allocated for this purpouse, if they occupy the same
1427 * stack slot and if they are of the same type.
1428 * This won't cause conflicts since if 'local' is used to
1429 * store one of the values in the in_stack of a bblock, then
1430 * the same variable will be used for the same outgoing stack
1432 * This doesn't work when inlining methods, since the bblocks
1433 * in the inlined methods do not inherit their in_stack from
1434 * the bblock they are inlined to. See bug #58863 for an
1437 if (cfg->inlined_method)
1438 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1440 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1445 for (i = 0; i < bb->out_count; ++i) {
1446 outb = bb->out_bb [i];
1447 /* exception handlers are linked, but they should not be considered for stack args */
1448 if (outb->flags & BB_EXCEPTION_HANDLER)
1450 if (outb->in_scount) {
1451 if (outb->in_scount != bb->out_scount) {
1452 cfg->unverifiable = TRUE;
1455 continue; /* check they are the same locals */
1457 outb->in_scount = count;
1458 outb->in_stack = bb->out_stack;
1461 locals = bb->out_stack;
1463 for (i = 0; i < count; ++i) {
1464 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1465 inst->cil_code = sp [i]->cil_code;
1466 sp [i] = locals [i];
1467 if (cfg->verbose_level > 3)
1468 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1472 * It is possible that the out bblocks already have in_stack assigned, and
1473 * the in_stacks differ. In this case, we will store to all the different
1480 /* Find a bblock which has a different in_stack */
1482 while (bindex < bb->out_count) {
1483 outb = bb->out_bb [bindex];
1484 /* exception handlers are linked, but they should not be considered for stack args */
1485 if (outb->flags & BB_EXCEPTION_HANDLER) {
1489 if (outb->in_stack != locals) {
1490 for (i = 0; i < count; ++i) {
1491 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1492 inst->cil_code = sp [i]->cil_code;
1493 sp [i] = locals [i];
1494 if (cfg->verbose_level > 3)
1495 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1497 locals = outb->in_stack;
1506 /* Emit code which loads interface_offsets [klass->interface_id]
1507 * The array is stored in memory before vtable.
1510 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1512 if (cfg->compile_aot) {
1513 int ioffset_reg = alloc_preg (cfg);
1514 int iid_reg = alloc_preg (cfg);
1516 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1517 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1518 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1521 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1526 mini_emit_interface_bitmap_check (MonoCompile *cfg, int intf_bit_reg, int base_reg, int offset, MonoClass *klass)
1528 int ibitmap_reg = alloc_preg (cfg);
1529 #ifdef COMPRESSED_INTERFACE_BITMAP
1531 MonoInst *res, *ins;
1532 NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, ibitmap_reg, base_reg, offset);
1533 MONO_ADD_INS (cfg->cbb, ins);
1535 if (cfg->compile_aot)
1536 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_IID, klass);
1538 EMIT_NEW_ICONST (cfg, args [1], klass->interface_id);
1539 res = mono_emit_jit_icall (cfg, mono_class_interface_match, args);
1540 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, intf_bit_reg, res->dreg);
1542 int ibitmap_byte_reg = alloc_preg (cfg);
1544 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, base_reg, offset);
1546 if (cfg->compile_aot) {
1547 int iid_reg = alloc_preg (cfg);
1548 int shifted_iid_reg = alloc_preg (cfg);
1549 int ibitmap_byte_address_reg = alloc_preg (cfg);
1550 int masked_iid_reg = alloc_preg (cfg);
1551 int iid_one_bit_reg = alloc_preg (cfg);
1552 int iid_bit_reg = alloc_preg (cfg);
1553 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1554 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1555 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1556 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1557 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1558 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1559 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1560 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1562 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1563 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1569 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1570 * stored in "klass_reg" implements the interface "klass".
1573 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1575 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, interface_bitmap), klass);
1579 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1580 * stored in "vtable_reg" implements the interface "klass".
1583 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1585 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, interface_bitmap), klass);
1589 * Emit code which checks whenever the interface id of @klass is smaller than
1590 * than the value given by max_iid_reg.
1593 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1594 MonoBasicBlock *false_target)
1596 if (cfg->compile_aot) {
1597 int iid_reg = alloc_preg (cfg);
1598 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1599 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1602 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1604 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1606 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1609 /* Same as above, but obtains max_iid from a vtable */
1611 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1612 MonoBasicBlock *false_target)
1614 int max_iid_reg = alloc_preg (cfg);
1616 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, max_interface_id));
1617 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1620 /* Same as above, but obtains max_iid from a klass */
1622 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1623 MonoBasicBlock *false_target)
1625 int max_iid_reg = alloc_preg (cfg);
1627 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, max_interface_id));
1628 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1632 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1634 int idepth_reg = alloc_preg (cfg);
1635 int stypes_reg = alloc_preg (cfg);
1636 int stype = alloc_preg (cfg);
1638 mono_class_setup_supertypes (klass);
1640 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1641 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, idepth));
1642 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1643 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1645 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, supertypes));
1646 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1648 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1649 } else if (cfg->compile_aot) {
1650 int const_reg = alloc_preg (cfg);
1651 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1652 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1654 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1656 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1660 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1662 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1666 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1668 int intf_reg = alloc_preg (cfg);
1670 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1671 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1672 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1674 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1676 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1680 * Variant of the above that takes a register to the class, not the vtable.
1683 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1685 int intf_bit_reg = alloc_preg (cfg);
1687 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1688 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1689 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1691 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1693 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1697 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1700 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1701 } else if (cfg->compile_aot) {
1702 int const_reg = alloc_preg (cfg);
1703 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1704 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1706 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1708 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1712 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1714 mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1718 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1720 if (cfg->compile_aot) {
1721 int const_reg = alloc_preg (cfg);
1722 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1723 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1725 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1727 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1731 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1734 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1737 int rank_reg = alloc_preg (cfg);
1738 int eclass_reg = alloc_preg (cfg);
1740 g_assert (!klass_inst);
1741 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, rank));
1742 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1743 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1744 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
1745 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, cast_class));
1746 if (klass->cast_class == mono_defaults.object_class) {
1747 int parent_reg = alloc_preg (cfg);
1748 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, MONO_STRUCT_OFFSET (MonoClass, parent));
1749 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1750 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1751 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1752 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1753 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1754 } else if (klass->cast_class == mono_defaults.enum_class) {
1755 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1756 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1757 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1759 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1760 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1763 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1764 /* Check that the object is a vector too */
1765 int bounds_reg = alloc_preg (cfg);
1766 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, MONO_STRUCT_OFFSET (MonoArray, bounds));
1767 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1768 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1771 int idepth_reg = alloc_preg (cfg);
1772 int stypes_reg = alloc_preg (cfg);
1773 int stype = alloc_preg (cfg);
1775 mono_class_setup_supertypes (klass);
1777 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1778 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, idepth));
1779 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1780 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1782 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, supertypes));
1783 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1784 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1789 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1791 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1795 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1799 g_assert (val == 0);
1804 if ((size <= SIZEOF_REGISTER) && (size <= align)) {
1807 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1810 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1813 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1815 #if SIZEOF_REGISTER == 8
1817 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1823 val_reg = alloc_preg (cfg);
1825 if (SIZEOF_REGISTER == 8)
1826 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1828 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1831 /* This could be optimized further if neccesary */
1833 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1840 #if !NO_UNALIGNED_ACCESS
1841 if (SIZEOF_REGISTER == 8) {
1843 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1848 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1856 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1861 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1866 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1873 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1880 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1881 g_assert (size < 10000);
1884 /* This could be optimized further if neccesary */
1886 cur_reg = alloc_preg (cfg);
1887 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1888 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1895 #if !NO_UNALIGNED_ACCESS
1896 if (SIZEOF_REGISTER == 8) {
1898 cur_reg = alloc_preg (cfg);
1899 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1900 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1909 cur_reg = alloc_preg (cfg);
1910 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1911 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1917 cur_reg = alloc_preg (cfg);
1918 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1919 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1925 cur_reg = alloc_preg (cfg);
1926 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1927 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1935 emit_tls_set (MonoCompile *cfg, int sreg1, int tls_key)
1939 if (cfg->compile_aot) {
1940 EMIT_NEW_TLS_OFFSETCONST (cfg, c, tls_key);
1941 MONO_INST_NEW (cfg, ins, OP_TLS_SET_REG);
1943 ins->sreg2 = c->dreg;
1944 MONO_ADD_INS (cfg->cbb, ins);
1946 MONO_INST_NEW (cfg, ins, OP_TLS_SET);
1948 ins->inst_offset = mini_get_tls_offset (tls_key);
1949 MONO_ADD_INS (cfg->cbb, ins);
1956 * Emit IR to push the current LMF onto the LMF stack.
1959 emit_push_lmf (MonoCompile *cfg)
1962 * Emit IR to push the LMF:
1963 * lmf_addr = <lmf_addr from tls>
1964 * lmf->lmf_addr = lmf_addr
1965 * lmf->prev_lmf = *lmf_addr
1968 int lmf_reg, prev_lmf_reg;
1969 MonoInst *ins, *lmf_ins;
1974 if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
1975 /* Load current lmf */
1976 lmf_ins = mono_get_lmf_intrinsic (cfg);
1978 MONO_ADD_INS (cfg->cbb, lmf_ins);
1979 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
1980 lmf_reg = ins->dreg;
1981 /* Save previous_lmf */
1982 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), lmf_ins->dreg);
1984 emit_tls_set (cfg, lmf_reg, TLS_KEY_LMF);
1987 * Store lmf_addr in a variable, so it can be allocated to a global register.
1989 if (!cfg->lmf_addr_var)
1990 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1993 ins = mono_get_jit_tls_intrinsic (cfg);
1995 int jit_tls_dreg = ins->dreg;
1997 MONO_ADD_INS (cfg->cbb, ins);
1998 lmf_reg = alloc_preg (cfg);
1999 EMIT_NEW_BIALU_IMM (cfg, lmf_ins, OP_PADD_IMM, lmf_reg, jit_tls_dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
2001 lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
2004 lmf_ins = mono_get_lmf_addr_intrinsic (cfg);
2006 MONO_ADD_INS (cfg->cbb, lmf_ins);
2009 MonoInst *args [16], *jit_tls_ins, *ins;
2011 /* Inline mono_get_lmf_addr () */
2012 /* jit_tls = pthread_getspecific (mono_jit_tls_id); lmf_addr = &jit_tls->lmf; */
2014 /* Load mono_jit_tls_id */
2015 EMIT_NEW_AOTCONST (cfg, args [0], MONO_PATCH_INFO_JIT_TLS_ID, NULL);
2016 /* call pthread_getspecific () */
2017 jit_tls_ins = mono_emit_jit_icall (cfg, pthread_getspecific, args);
2018 /* lmf_addr = &jit_tls->lmf */
2019 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, cfg->lmf_addr_var->dreg, jit_tls_ins->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
2022 lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
2026 lmf_ins->dreg = cfg->lmf_addr_var->dreg;
2028 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
2029 lmf_reg = ins->dreg;
2031 prev_lmf_reg = alloc_preg (cfg);
2032 /* Save previous_lmf */
2033 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, cfg->lmf_addr_var->dreg, 0);
2034 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), prev_lmf_reg);
2036 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, cfg->lmf_addr_var->dreg, 0, lmf_reg);
2043 * Emit IR to pop the current LMF from the LMF stack.
2046 emit_pop_lmf (MonoCompile *cfg)
2048 int lmf_reg, lmf_addr_reg, prev_lmf_reg;
2054 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
2055 lmf_reg = ins->dreg;
2057 if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
2058 /* Load previous_lmf */
2059 prev_lmf_reg = alloc_preg (cfg);
2060 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
2062 emit_tls_set (cfg, prev_lmf_reg, TLS_KEY_LMF);
2065 * Emit IR to pop the LMF:
2066 * *(lmf->lmf_addr) = lmf->prev_lmf
2068 /* This could be called before emit_push_lmf () */
2069 if (!cfg->lmf_addr_var)
2070 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2071 lmf_addr_reg = cfg->lmf_addr_var->dreg;
2073 prev_lmf_reg = alloc_preg (cfg);
2074 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
2075 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_addr_reg, 0, prev_lmf_reg);
2080 emit_instrumentation_call (MonoCompile *cfg, void *func)
2082 MonoInst *iargs [1];
2085 * Avoid instrumenting inlined methods since it can
2086 * distort profiling results.
2088 if (cfg->method != cfg->current_method)
2091 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE) {
2092 EMIT_NEW_METHODCONST (cfg, iargs [0], cfg->method);
2093 mono_emit_jit_icall (cfg, func, iargs);
2098 ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
2101 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2104 type = mini_get_basic_type_from_generic (gsctx, type);
2105 type = mini_replace_type (type);
2106 switch (type->type) {
2107 case MONO_TYPE_VOID:
2108 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALL_MEMBASE: OP_VOIDCALL;
2111 case MONO_TYPE_BOOLEAN:
2114 case MONO_TYPE_CHAR:
2117 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2121 case MONO_TYPE_FNPTR:
2122 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2123 case MONO_TYPE_CLASS:
2124 case MONO_TYPE_STRING:
2125 case MONO_TYPE_OBJECT:
2126 case MONO_TYPE_SZARRAY:
2127 case MONO_TYPE_ARRAY:
2128 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2131 return calli? OP_LCALL_REG: virt? OP_LCALL_MEMBASE: OP_LCALL;
2134 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
2135 case MONO_TYPE_VALUETYPE:
2136 if (type->data.klass->enumtype) {
2137 type = mono_class_enum_basetype (type->data.klass);
2140 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2141 case MONO_TYPE_TYPEDBYREF:
2142 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2143 case MONO_TYPE_GENERICINST:
2144 type = &type->data.generic_class->container_class->byval_arg;
2147 case MONO_TYPE_MVAR:
2149 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2151 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
2157 * target_type_is_incompatible:
2158 * @cfg: MonoCompile context
2160 * Check that the item @arg on the evaluation stack can be stored
2161 * in the target type (can be a local, or field, etc).
2162 * The cfg arg can be used to check if we need verification or just
2165 * Returns: non-0 value if arg can't be stored on a target.
2168 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
2170 MonoType *simple_type;
2173 target = mini_replace_type (target);
2174 if (target->byref) {
2175 /* FIXME: check that the pointed to types match */
2176 if (arg->type == STACK_MP)
2177 return arg->klass != mono_class_from_mono_type (target);
2178 if (arg->type == STACK_PTR)
2183 simple_type = mono_type_get_underlying_type (target);
2184 switch (simple_type->type) {
2185 case MONO_TYPE_VOID:
2189 case MONO_TYPE_BOOLEAN:
2192 case MONO_TYPE_CHAR:
2195 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
2199 /* STACK_MP is needed when setting pinned locals */
2200 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2205 case MONO_TYPE_FNPTR:
2207 * Some opcodes like ldloca returns 'transient pointers' which can be stored in
2208 * in native int. (#688008).
2210 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2213 case MONO_TYPE_CLASS:
2214 case MONO_TYPE_STRING:
2215 case MONO_TYPE_OBJECT:
2216 case MONO_TYPE_SZARRAY:
2217 case MONO_TYPE_ARRAY:
2218 if (arg->type != STACK_OBJ)
2220 /* FIXME: check type compatibility */
2224 if (arg->type != STACK_I8)
2229 if (arg->type != STACK_R8)
2232 case MONO_TYPE_VALUETYPE:
2233 if (arg->type != STACK_VTYPE)
2235 klass = mono_class_from_mono_type (simple_type);
2236 if (klass != arg->klass)
2239 case MONO_TYPE_TYPEDBYREF:
2240 if (arg->type != STACK_VTYPE)
2242 klass = mono_class_from_mono_type (simple_type);
2243 if (klass != arg->klass)
2246 case MONO_TYPE_GENERICINST:
2247 if (mono_type_generic_inst_is_valuetype (simple_type)) {
2248 if (arg->type != STACK_VTYPE)
2250 klass = mono_class_from_mono_type (simple_type);
2251 if (klass != arg->klass)
2255 if (arg->type != STACK_OBJ)
2257 /* FIXME: check type compatibility */
2261 case MONO_TYPE_MVAR:
2262 g_assert (cfg->generic_sharing_context);
2263 if (mini_type_var_is_vt (cfg, simple_type)) {
2264 if (arg->type != STACK_VTYPE)
2267 if (arg->type != STACK_OBJ)
2272 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
2278 * Prepare arguments for passing to a function call.
2279 * Return a non-zero value if the arguments can't be passed to the given
2281 * The type checks are not yet complete and some conversions may need
2282 * casts on 32 or 64 bit architectures.
2284 * FIXME: implement this using target_type_is_incompatible ()
2287 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
2289 MonoType *simple_type;
2293 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
2297 for (i = 0; i < sig->param_count; ++i) {
2298 if (sig->params [i]->byref) {
2299 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
2303 simple_type = sig->params [i];
2304 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
2306 switch (simple_type->type) {
2307 case MONO_TYPE_VOID:
2312 case MONO_TYPE_BOOLEAN:
2315 case MONO_TYPE_CHAR:
2318 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2324 case MONO_TYPE_FNPTR:
2325 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2328 case MONO_TYPE_CLASS:
2329 case MONO_TYPE_STRING:
2330 case MONO_TYPE_OBJECT:
2331 case MONO_TYPE_SZARRAY:
2332 case MONO_TYPE_ARRAY:
2333 if (args [i]->type != STACK_OBJ)
2338 if (args [i]->type != STACK_I8)
2343 if (args [i]->type != STACK_R8)
2346 case MONO_TYPE_VALUETYPE:
2347 if (simple_type->data.klass->enumtype) {
2348 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2351 if (args [i]->type != STACK_VTYPE)
2354 case MONO_TYPE_TYPEDBYREF:
2355 if (args [i]->type != STACK_VTYPE)
2358 case MONO_TYPE_GENERICINST:
2359 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2362 case MONO_TYPE_MVAR:
2364 if (args [i]->type != STACK_VTYPE)
2368 g_error ("unknown type 0x%02x in check_call_signature",
2376 callvirt_to_call (int opcode)
2379 case OP_CALL_MEMBASE:
2381 case OP_VOIDCALL_MEMBASE:
2383 case OP_FCALL_MEMBASE:
2385 case OP_VCALL_MEMBASE:
2387 case OP_LCALL_MEMBASE:
2390 g_assert_not_reached ();
2396 /* Either METHOD or IMT_ARG needs to be set */
2398 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoMethod *method, MonoInst *imt_arg)
2402 if (COMPILE_LLVM (cfg)) {
2403 method_reg = alloc_preg (cfg);
2406 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2407 } else if (cfg->compile_aot) {
2408 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, method, MONO_PATCH_INFO_METHODCONST);
2411 MONO_INST_NEW (cfg, ins, OP_PCONST);
2412 ins->inst_p0 = method;
2413 ins->dreg = method_reg;
2414 MONO_ADD_INS (cfg->cbb, ins);
2418 call->imt_arg_reg = method_reg;
2420 #ifdef MONO_ARCH_IMT_REG
2421 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2423 /* Need this to keep the IMT arg alive */
2424 mono_call_inst_add_outarg_reg (cfg, call, method_reg, 0, FALSE);
2429 #ifdef MONO_ARCH_IMT_REG
2430 method_reg = alloc_preg (cfg);
2433 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2434 } else if (cfg->compile_aot) {
2435 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, method, MONO_PATCH_INFO_METHODCONST);
2438 MONO_INST_NEW (cfg, ins, OP_PCONST);
2439 ins->inst_p0 = method;
2440 ins->dreg = method_reg;
2441 MONO_ADD_INS (cfg->cbb, ins);
2444 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2446 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2450 static MonoJumpInfo *
2451 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2453 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2457 ji->data.target = target;
2463 mini_class_check_context_used (MonoCompile *cfg, MonoClass *klass)
2465 if (cfg->generic_sharing_context)
2466 return mono_class_check_context_used (klass);
2472 mini_method_check_context_used (MonoCompile *cfg, MonoMethod *method)
2474 if (cfg->generic_sharing_context)
2475 return mono_method_check_context_used (method);
2481 * check_method_sharing:
2483 * Check whenever the vtable or an mrgctx needs to be passed when calling CMETHOD.
2486 check_method_sharing (MonoCompile *cfg, MonoMethod *cmethod, gboolean *out_pass_vtable, gboolean *out_pass_mrgctx)
2488 gboolean pass_vtable = FALSE;
2489 gboolean pass_mrgctx = FALSE;
2491 if (((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
2492 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
2493 gboolean sharable = FALSE;
2495 if (mono_method_is_generic_sharable (cmethod, TRUE)) {
2498 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
2499 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
2500 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
2502 sharable = sharing_enabled && context_sharable;
2506 * Pass vtable iff target method might
2507 * be shared, which means that sharing
2508 * is enabled for its class and its
2509 * context is sharable (and it's not a
2512 if (sharable && !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
2516 if (mini_method_get_context (cmethod) &&
2517 mini_method_get_context (cmethod)->method_inst) {
2518 g_assert (!pass_vtable);
2520 if (mono_method_is_generic_sharable (cmethod, TRUE)) {
2523 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
2524 MonoGenericContext *context = mini_method_get_context (cmethod);
2525 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
2527 if (sharing_enabled && context_sharable)
2529 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, mono_method_signature (cmethod)))
2534 if (out_pass_vtable)
2535 *out_pass_vtable = pass_vtable;
2536 if (out_pass_mrgctx)
2537 *out_pass_mrgctx = pass_mrgctx;
2540 inline static MonoCallInst *
2541 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2542 MonoInst **args, int calli, int virtual, int tail, int rgctx, int unbox_trampoline)
2546 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2551 emit_instrumentation_call (cfg, mono_profiler_method_leave);
2553 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2555 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
2558 call->signature = sig;
2559 call->rgctx_reg = rgctx;
2560 sig_ret = mini_replace_type (sig->ret);
2562 type_to_eval_stack_type ((cfg), sig_ret, &call->inst);
2565 if (mini_type_is_vtype (cfg, sig_ret)) {
2566 call->vret_var = cfg->vret_addr;
2567 //g_assert_not_reached ();
2569 } else if (mini_type_is_vtype (cfg, sig_ret)) {
2570 MonoInst *temp = mono_compile_create_var (cfg, sig_ret, OP_LOCAL);
2573 temp->backend.is_pinvoke = sig->pinvoke;
2576 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2577 * address of return value to increase optimization opportunities.
2578 * Before vtype decomposition, the dreg of the call ins itself represents the
2579 * fact the call modifies the return value. After decomposition, the call will
2580 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2581 * will be transformed into an LDADDR.
2583 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2584 loada->dreg = alloc_preg (cfg);
2585 loada->inst_p0 = temp;
2586 /* We reference the call too since call->dreg could change during optimization */
2587 loada->inst_p1 = call;
2588 MONO_ADD_INS (cfg->cbb, loada);
2590 call->inst.dreg = temp->dreg;
2592 call->vret_var = loada;
2593 } else if (!MONO_TYPE_IS_VOID (sig_ret))
2594 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2596 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2597 if (COMPILE_SOFT_FLOAT (cfg)) {
2599 * If the call has a float argument, we would need to do an r8->r4 conversion using
2600 * an icall, but that cannot be done during the call sequence since it would clobber
2601 * the call registers + the stack. So we do it before emitting the call.
2603 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2605 MonoInst *in = call->args [i];
2607 if (i >= sig->hasthis)
2608 t = sig->params [i - sig->hasthis];
2610 t = &mono_defaults.int_class->byval_arg;
2611 t = mono_type_get_underlying_type (t);
2613 if (!t->byref && t->type == MONO_TYPE_R4) {
2614 MonoInst *iargs [1];
2618 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2620 /* The result will be in an int vreg */
2621 call->args [i] = conv;
2627 call->need_unbox_trampoline = unbox_trampoline;
2630 if (COMPILE_LLVM (cfg))
2631 mono_llvm_emit_call (cfg, call);
2633 mono_arch_emit_call (cfg, call);
2635 mono_arch_emit_call (cfg, call);
2638 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2639 cfg->flags |= MONO_CFG_HAS_CALLS;
2645 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2647 #ifdef MONO_ARCH_RGCTX_REG
2648 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2649 cfg->uses_rgctx_reg = TRUE;
2650 call->rgctx_reg = TRUE;
2652 call->rgctx_arg_reg = rgctx_reg;
2659 inline static MonoInst*
2660 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *imt_arg, MonoInst *rgctx_arg)
2665 gboolean check_sp = FALSE;
2667 if (cfg->check_pinvoke_callconv && cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
2668 WrapperInfo *info = mono_marshal_get_wrapper_info (cfg->method);
2670 if (info && info->subtype == WRAPPER_SUBTYPE_PINVOKE)
2675 rgctx_reg = mono_alloc_preg (cfg);
2676 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2680 if (!cfg->stack_inbalance_var)
2681 cfg->stack_inbalance_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2683 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2684 ins->dreg = cfg->stack_inbalance_var->dreg;
2685 MONO_ADD_INS (cfg->cbb, ins);
2688 call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE, rgctx_arg ? TRUE : FALSE, FALSE);
2690 call->inst.sreg1 = addr->dreg;
2693 emit_imt_argument (cfg, call, NULL, imt_arg);
2695 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2700 sp_reg = mono_alloc_preg (cfg);
2702 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2704 MONO_ADD_INS (cfg->cbb, ins);
2706 /* Restore the stack so we don't crash when throwing the exception */
2707 MONO_INST_NEW (cfg, ins, OP_SET_SP);
2708 ins->sreg1 = cfg->stack_inbalance_var->dreg;
2709 MONO_ADD_INS (cfg->cbb, ins);
2711 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, cfg->stack_inbalance_var->dreg, sp_reg);
2712 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ExecutionEngineException");
2716 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2718 return (MonoInst*)call;
2722 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2725 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type);
2727 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2730 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig, gboolean tail,
2731 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *rgctx_arg)
2733 #ifndef DISABLE_REMOTING
2734 gboolean might_be_remote = FALSE;
2736 gboolean virtual = this != NULL;
2737 gboolean enable_for_aot = TRUE;
2741 gboolean need_unbox_trampoline;
2744 sig = mono_method_signature (method);
2747 rgctx_reg = mono_alloc_preg (cfg);
2748 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2751 if (method->string_ctor) {
2752 /* Create the real signature */
2753 /* FIXME: Cache these */
2754 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2755 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2760 context_used = mini_method_check_context_used (cfg, method);
2762 #ifndef DISABLE_REMOTING
2763 might_be_remote = this && sig->hasthis &&
2764 (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) &&
2765 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && (!MONO_CHECK_THIS (this) || context_used);
2767 if (might_be_remote && context_used) {
2770 g_assert (cfg->generic_sharing_context);
2772 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2774 return mono_emit_calli (cfg, sig, args, addr, NULL, NULL);
2778 need_unbox_trampoline = method->klass == mono_defaults.object_class || (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE);
2780 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, tail, rgctx_arg ? TRUE : FALSE, need_unbox_trampoline);
2782 #ifndef DISABLE_REMOTING
2783 if (might_be_remote)
2784 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2787 call->method = method;
2788 call->inst.flags |= MONO_INST_HAS_METHOD;
2789 call->inst.inst_left = this;
2790 call->tail_call = tail;
2793 int vtable_reg, slot_reg, this_reg;
2796 this_reg = this->dreg;
2798 if (ARCH_HAVE_DELEGATE_TRAMPOLINES && (method->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (method->name, "Invoke")) {
2799 MonoInst *dummy_use;
2801 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2803 /* Make a call to delegate->invoke_impl */
2804 call->inst.inst_basereg = this_reg;
2805 call->inst.inst_offset = MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2806 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2808 /* We must emit a dummy use here because the delegate trampoline will
2809 replace the 'this' argument with the delegate target making this activation
2810 no longer a root for the delegate.
2811 This is an issue for delegates that target collectible code such as dynamic
2812 methods of GC'able assemblies.
2814 For a test case look into #667921.
2816 FIXME: a dummy use is not the best way to do it as the local register allocator
2817 will put it on a caller save register and spil it around the call.
2818 Ideally, we would either put it on a callee save register or only do the store part.
2820 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [0]);
2822 return (MonoInst*)call;
2825 if ((!cfg->compile_aot || enable_for_aot) &&
2826 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2827 (MONO_METHOD_IS_FINAL (method) &&
2828 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2829 !(mono_class_is_marshalbyref (method->klass) && context_used)) {
2831 * the method is not virtual, we just need to ensure this is not null
2832 * and then we can call the method directly.
2834 #ifndef DISABLE_REMOTING
2835 if (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) {
2837 * The check above ensures method is not gshared, this is needed since
2838 * gshared methods can't have wrappers.
2840 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2844 if (!method->string_ctor)
2845 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2847 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2848 } else if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2850 * the method is virtual, but we can statically dispatch since either
2851 * it's class or the method itself are sealed.
2852 * But first we need to ensure it's not a null reference.
2854 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2856 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2858 vtable_reg = alloc_preg (cfg);
2859 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2860 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2863 guint32 imt_slot = mono_method_get_imt_slot (method);
2864 emit_imt_argument (cfg, call, call->method, imt_arg);
2865 slot_reg = vtable_reg;
2866 offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2868 if (slot_reg == -1) {
2869 slot_reg = alloc_preg (cfg);
2870 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2871 offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
2874 slot_reg = vtable_reg;
2875 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) +
2876 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2878 g_assert (mono_method_signature (method)->generic_param_count);
2879 emit_imt_argument (cfg, call, call->method, imt_arg);
2883 call->inst.sreg1 = slot_reg;
2884 call->inst.inst_offset = offset;
2885 call->virtual = TRUE;
2889 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2892 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2894 return (MonoInst*)call;
2898 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2900 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), FALSE, args, this, NULL, NULL);
2904 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2911 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE, FALSE, FALSE);
2914 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2916 return (MonoInst*)call;
2920 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2922 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2926 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2930 * mono_emit_abs_call:
2932 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2934 inline static MonoInst*
2935 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2936 MonoMethodSignature *sig, MonoInst **args)
2938 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2942 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2945 if (cfg->abs_patches == NULL)
2946 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2947 g_hash_table_insert (cfg->abs_patches, ji, ji);
2948 ins = mono_emit_native_call (cfg, ji, sig, args);
2949 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2954 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2956 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2957 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2961 * Native code might return non register sized integers
2962 * without initializing the upper bits.
2964 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
2965 case OP_LOADI1_MEMBASE:
2966 widen_op = OP_ICONV_TO_I1;
2968 case OP_LOADU1_MEMBASE:
2969 widen_op = OP_ICONV_TO_U1;
2971 case OP_LOADI2_MEMBASE:
2972 widen_op = OP_ICONV_TO_I2;
2974 case OP_LOADU2_MEMBASE:
2975 widen_op = OP_ICONV_TO_U2;
2981 if (widen_op != -1) {
2982 int dreg = alloc_preg (cfg);
2985 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
2986 widen->type = ins->type;
2996 get_memcpy_method (void)
2998 static MonoMethod *memcpy_method = NULL;
2999 if (!memcpy_method) {
3000 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
3002 g_error ("Old corlib found. Install a new one");
3004 return memcpy_method;
3008 create_write_barrier_bitmap (MonoCompile *cfg, MonoClass *klass, unsigned *wb_bitmap, int offset)
3010 MonoClassField *field;
3011 gpointer iter = NULL;
3013 while ((field = mono_class_get_fields (klass, &iter))) {
3016 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
3018 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
3019 if (mini_type_is_reference (cfg, mono_field_get_type (field))) {
3020 g_assert ((foffset % SIZEOF_VOID_P) == 0);
3021 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
3023 MonoClass *field_class = mono_class_from_mono_type (field->type);
3024 if (field_class->has_references)
3025 create_write_barrier_bitmap (cfg, field_class, wb_bitmap, offset + foffset);
3031 emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value)
3033 int card_table_shift_bits;
3034 gpointer card_table_mask;
3036 MonoInst *dummy_use;
3037 int nursery_shift_bits;
3038 size_t nursery_size;
3039 gboolean has_card_table_wb = FALSE;
3041 if (!cfg->gen_write_barriers)
3044 card_table = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
3046 mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
3048 #ifdef MONO_ARCH_HAVE_CARD_TABLE_WBARRIER
3049 has_card_table_wb = TRUE;
3052 if (has_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0 && !COMPILE_LLVM (cfg)) {
3055 MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
3056 wbarrier->sreg1 = ptr->dreg;
3057 wbarrier->sreg2 = value->dreg;
3058 MONO_ADD_INS (cfg->cbb, wbarrier);
3059 } else if (card_table) {
3060 int offset_reg = alloc_preg (cfg);
3061 int card_reg = alloc_preg (cfg);
3064 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
3065 if (card_table_mask)
3066 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
3068 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
3069 * IMM's larger than 32bits.
3071 if (cfg->compile_aot) {
3072 MONO_EMIT_NEW_AOTCONST (cfg, card_reg, NULL, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR);
3074 MONO_INST_NEW (cfg, ins, OP_PCONST);
3075 ins->inst_p0 = card_table;
3076 ins->dreg = card_reg;
3077 MONO_ADD_INS (cfg->cbb, ins);
3080 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
3081 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
3083 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
3084 mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
3087 EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
3091 mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
3093 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
3094 unsigned need_wb = 0;
3099 /*types with references can't have alignment smaller than sizeof(void*) */
3100 if (align < SIZEOF_VOID_P)
3103 /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
3104 if (size > 32 * SIZEOF_VOID_P)
3107 create_write_barrier_bitmap (cfg, klass, &need_wb, 0);
3109 /* We don't unroll more than 5 stores to avoid code bloat. */
3110 if (size > 5 * SIZEOF_VOID_P) {
3111 /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
3112 size += (SIZEOF_VOID_P - 1);
3113 size &= ~(SIZEOF_VOID_P - 1);
3115 EMIT_NEW_ICONST (cfg, iargs [2], size);
3116 EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
3117 mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
3121 destreg = iargs [0]->dreg;
3122 srcreg = iargs [1]->dreg;
3125 dest_ptr_reg = alloc_preg (cfg);
3126 tmp_reg = alloc_preg (cfg);
3129 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
3131 while (size >= SIZEOF_VOID_P) {
3132 MonoInst *load_inst;
3133 MONO_INST_NEW (cfg, load_inst, OP_LOAD_MEMBASE);
3134 load_inst->dreg = tmp_reg;
3135 load_inst->inst_basereg = srcreg;
3136 load_inst->inst_offset = offset;
3137 MONO_ADD_INS (cfg->cbb, load_inst);
3139 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
3142 emit_write_barrier (cfg, iargs [0], load_inst);
3144 offset += SIZEOF_VOID_P;
3145 size -= SIZEOF_VOID_P;
3148 /*tmp += sizeof (void*)*/
3149 if (size >= SIZEOF_VOID_P) {
3150 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
3151 MONO_ADD_INS (cfg->cbb, iargs [0]);
3155 /* Those cannot be references since size < sizeof (void*) */
3157 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
3158 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
3164 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
3165 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
3171 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
3172 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
3181 * Emit code to copy a valuetype of type @klass whose address is stored in
3182 * @src->dreg to memory whose address is stored at @dest->dreg.
3185 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
3187 MonoInst *iargs [4];
3188 int context_used, n;
3190 MonoMethod *memcpy_method;
3191 MonoInst *size_ins = NULL;
3192 MonoInst *memcpy_ins = NULL;
3196 * This check breaks with spilled vars... need to handle it during verification anyway.
3197 * g_assert (klass && klass == src->klass && klass == dest->klass);
3200 if (mini_is_gsharedvt_klass (cfg, klass)) {
3202 context_used = mini_class_check_context_used (cfg, klass);
3203 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3204 memcpy_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_MEMCPY);
3208 n = mono_class_native_size (klass, &align);
3210 n = mono_class_value_size (klass, &align);
3212 /* if native is true there should be no references in the struct */
3213 if (cfg->gen_write_barriers && (klass->has_references || size_ins) && !native) {
3214 /* Avoid barriers when storing to the stack */
3215 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
3216 (dest->opcode == OP_LDADDR))) {
3222 context_used = mini_class_check_context_used (cfg, klass);
3224 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
3225 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
3227 } else if (context_used) {
3228 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3230 if (cfg->compile_aot) {
3231 EMIT_NEW_CLASSCONST (cfg, iargs [2], klass);
3233 EMIT_NEW_PCONST (cfg, iargs [2], klass);
3234 mono_class_compute_gc_descriptor (klass);
3239 mono_emit_jit_icall (cfg, mono_gsharedvt_value_copy, iargs);
3241 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
3246 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
3247 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
3248 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
3253 iargs [2] = size_ins;
3255 EMIT_NEW_ICONST (cfg, iargs [2], n);
3257 memcpy_method = get_memcpy_method ();
3259 mono_emit_calli (cfg, mono_method_signature (memcpy_method), iargs, memcpy_ins, NULL, NULL);
3261 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
3266 get_memset_method (void)
3268 static MonoMethod *memset_method = NULL;
3269 if (!memset_method) {
3270 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
3272 g_error ("Old corlib found. Install a new one");
3274 return memset_method;
3278 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
3280 MonoInst *iargs [3];
3281 int n, context_used;
3283 MonoMethod *memset_method;
3284 MonoInst *size_ins = NULL;
3285 MonoInst *bzero_ins = NULL;
3286 static MonoMethod *bzero_method;
3288 /* FIXME: Optimize this for the case when dest is an LDADDR */
3290 mono_class_init (klass);
3291 if (mini_is_gsharedvt_klass (cfg, klass)) {
3292 context_used = mini_class_check_context_used (cfg, klass);
3293 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3294 bzero_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_BZERO);
3296 bzero_method = mono_class_get_method_from_name (mono_defaults.string_class, "bzero_aligned_1", 2);
3297 g_assert (bzero_method);
3299 iargs [1] = size_ins;
3300 mono_emit_calli (cfg, mono_method_signature (bzero_method), iargs, bzero_ins, NULL, NULL);
3304 n = mono_class_value_size (klass, &align);
3306 if (n <= sizeof (gpointer) * 5) {
3307 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
3310 memset_method = get_memset_method ();
3312 EMIT_NEW_ICONST (cfg, iargs [1], 0);
3313 EMIT_NEW_ICONST (cfg, iargs [2], n);
3314 mono_emit_method_call (cfg, memset_method, iargs, NULL);
3319 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
3321 MonoInst *this = NULL;
3323 g_assert (cfg->generic_sharing_context);
3325 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
3326 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
3327 !method->klass->valuetype)
3328 EMIT_NEW_ARGLOAD (cfg, this, 0);
3330 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
3331 MonoInst *mrgctx_loc, *mrgctx_var;
3334 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
3336 mrgctx_loc = mono_get_vtable_var (cfg);
3337 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
3340 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
3341 MonoInst *vtable_loc, *vtable_var;
3345 vtable_loc = mono_get_vtable_var (cfg);
3346 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
3348 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
3349 MonoInst *mrgctx_var = vtable_var;
3352 vtable_reg = alloc_preg (cfg);
3353 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, MONO_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
3354 vtable_var->type = STACK_PTR;
3362 vtable_reg = alloc_preg (cfg);
3363 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3368 static MonoJumpInfoRgctxEntry *
3369 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, MonoRgctxInfoType info_type)
3371 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
3372 res->method = method;
3373 res->in_mrgctx = in_mrgctx;
3374 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
3375 res->data->type = patch_type;
3376 res->data->data.target = patch_data;
3377 res->info_type = info_type;
3382 static inline MonoInst*
3383 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3385 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
3389 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
3390 MonoClass *klass, MonoRgctxInfoType rgctx_type)
3392 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
3393 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3395 return emit_rgctx_fetch (cfg, rgctx, entry);
3399 emit_get_rgctx_sig (MonoCompile *cfg, int context_used,
3400 MonoMethodSignature *sig, MonoRgctxInfoType rgctx_type)
3402 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_SIGNATURE, sig, rgctx_type);
3403 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3405 return emit_rgctx_fetch (cfg, rgctx, entry);
3409 emit_get_rgctx_gsharedvt_call (MonoCompile *cfg, int context_used,
3410 MonoMethodSignature *sig, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3412 MonoJumpInfoGSharedVtCall *call_info;
3413 MonoJumpInfoRgctxEntry *entry;
3416 call_info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoGSharedVtCall));
3417 call_info->sig = sig;
3418 call_info->method = cmethod;
3420 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_CALL, call_info, rgctx_type);
3421 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3423 return emit_rgctx_fetch (cfg, rgctx, entry);
3428 emit_get_rgctx_gsharedvt_method (MonoCompile *cfg, int context_used,
3429 MonoMethod *cmethod, MonoGSharedVtMethodInfo *info)
3431 MonoJumpInfoRgctxEntry *entry;
3434 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_METHOD, info, MONO_RGCTX_INFO_METHOD_GSHAREDVT_INFO);
3435 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3437 return emit_rgctx_fetch (cfg, rgctx, entry);
3441 * emit_get_rgctx_method:
3443 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
3444 * normal constants, else emit a load from the rgctx.
3447 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
3448 MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3450 if (!context_used) {
3453 switch (rgctx_type) {
3454 case MONO_RGCTX_INFO_METHOD:
3455 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
3457 case MONO_RGCTX_INFO_METHOD_RGCTX:
3458 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
3461 g_assert_not_reached ();
3464 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
3465 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3467 return emit_rgctx_fetch (cfg, rgctx, entry);
3472 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
3473 MonoClassField *field, MonoRgctxInfoType rgctx_type)
3475 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
3476 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3478 return emit_rgctx_fetch (cfg, rgctx, entry);
3482 get_gsharedvt_info_slot (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3484 MonoGSharedVtMethodInfo *info = cfg->gsharedvt_info;
3485 MonoRuntimeGenericContextInfoTemplate *template;
3490 for (i = 0; i < info->num_entries; ++i) {
3491 MonoRuntimeGenericContextInfoTemplate *otemplate = &info->entries [i];
3493 if (otemplate->info_type == rgctx_type && otemplate->data == data && rgctx_type != MONO_RGCTX_INFO_LOCAL_OFFSET)
3497 if (info->num_entries == info->count_entries) {
3498 MonoRuntimeGenericContextInfoTemplate *new_entries;
3499 int new_count_entries = info->count_entries ? info->count_entries * 2 : 16;
3501 new_entries = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * new_count_entries);
3503 memcpy (new_entries, info->entries, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
3504 info->entries = new_entries;
3505 info->count_entries = new_count_entries;
3508 idx = info->num_entries;
3509 template = &info->entries [idx];
3510 template->info_type = rgctx_type;
3511 template->data = data;
3513 info->num_entries ++;
3519 * emit_get_gsharedvt_info:
3521 * This is similar to emit_get_rgctx_.., but loads the data from the gsharedvt info var instead of calling an rgctx fetch trampoline.
3524 emit_get_gsharedvt_info (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3529 idx = get_gsharedvt_info_slot (cfg, data, rgctx_type);
3530 /* Load info->entries [idx] */
3531 dreg = alloc_preg (cfg);
3532 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, cfg->gsharedvt_info_var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
3538 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type)
3540 return emit_get_gsharedvt_info (cfg, &klass->byval_arg, rgctx_type);
3544 * On return the caller must check @klass for load errors.
3547 emit_generic_class_init (MonoCompile *cfg, MonoClass *klass)
3549 MonoInst *vtable_arg;
3553 context_used = mini_class_check_context_used (cfg, klass);
3556 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
3557 klass, MONO_RGCTX_INFO_VTABLE);
3559 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3563 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
3566 if (COMPILE_LLVM (cfg))
3567 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline_llvm, &vtable_arg);
3569 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable_arg);
3570 #ifdef MONO_ARCH_VTABLE_REG
3571 mono_call_inst_add_outarg_reg (cfg, call, vtable_arg->dreg, MONO_ARCH_VTABLE_REG, FALSE);
3572 cfg->uses_vtable_reg = TRUE;
3579 emit_seq_point (MonoCompile *cfg, MonoMethod *method, guint8* ip, gboolean intr_loc, gboolean nonempty_stack)
3583 if (cfg->gen_seq_points && cfg->method == method) {
3584 NEW_SEQ_POINT (cfg, ins, ip - cfg->header->code, intr_loc);
3586 ins->flags |= MONO_INST_NONEMPTY_STACK;
3587 MONO_ADD_INS (cfg->cbb, ins);
3592 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg, gboolean null_check, MonoBasicBlock **out_bblock)
3594 if (mini_get_debug_options ()->better_cast_details) {
3595 int vtable_reg = alloc_preg (cfg);
3596 int klass_reg = alloc_preg (cfg);
3597 MonoBasicBlock *is_null_bb = NULL;
3599 int to_klass_reg, context_used;
3602 NEW_BBLOCK (cfg, is_null_bb);
3604 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3605 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3608 tls_get = mono_get_jit_tls_intrinsic (cfg);
3610 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3614 MONO_ADD_INS (cfg->cbb, tls_get);
3615 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3616 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3618 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3620 context_used = mini_class_check_context_used (cfg, klass);
3622 MonoInst *class_ins;
3624 class_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3625 to_klass_reg = class_ins->dreg;
3627 to_klass_reg = alloc_preg (cfg);
3628 MONO_EMIT_NEW_CLASSCONST (cfg, to_klass_reg, klass);
3630 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3633 MONO_START_BB (cfg, is_null_bb);
3635 *out_bblock = cfg->cbb;
3641 reset_cast_details (MonoCompile *cfg)
3643 /* Reset the variables holding the cast details */
3644 if (mini_get_debug_options ()->better_cast_details) {
3645 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3647 MONO_ADD_INS (cfg->cbb, tls_get);
3648 /* It is enough to reset the from field */
3649 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3654 * On return the caller must check @array_class for load errors
3657 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
3659 int vtable_reg = alloc_preg (cfg);
3662 context_used = mini_class_check_context_used (cfg, array_class);
3664 save_cast_details (cfg, array_class, obj->dreg, FALSE, NULL);
3666 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3668 if (cfg->opt & MONO_OPT_SHARED) {
3669 int class_reg = alloc_preg (cfg);
3670 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3671 if (cfg->compile_aot) {
3672 int klass_reg = alloc_preg (cfg);
3673 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
3674 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
3676 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
3678 } else if (context_used) {
3679 MonoInst *vtable_ins;
3681 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
3682 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
3684 if (cfg->compile_aot) {
3688 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3690 vt_reg = alloc_preg (cfg);
3691 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
3692 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
3695 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3697 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
3701 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
3703 reset_cast_details (cfg);
3707 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
3708 * generic code is generated.
3711 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
3713 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
3716 MonoInst *rgctx, *addr;
3718 /* FIXME: What if the class is shared? We might not
3719 have to get the address of the method from the
3721 addr = emit_get_rgctx_method (cfg, context_used, method,
3722 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3724 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3726 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
3728 gboolean pass_vtable, pass_mrgctx;
3729 MonoInst *rgctx_arg = NULL;
3731 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
3732 g_assert (!pass_mrgctx);
3735 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3738 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
3741 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
3746 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
3750 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
3751 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
3752 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
3753 int rank_reg = alloc_dreg (cfg ,STACK_I4);
3755 obj_reg = sp [0]->dreg;
3756 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3757 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
3759 /* FIXME: generics */
3760 g_assert (klass->rank == 0);
3763 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
3764 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3766 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3767 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, element_class));
3770 MonoInst *element_class;
3772 /* This assertion is from the unboxcast insn */
3773 g_assert (klass->rank == 0);
3775 element_class = emit_get_rgctx_klass (cfg, context_used,
3776 klass->element_class, MONO_RGCTX_INFO_KLASS);
3778 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
3779 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3781 save_cast_details (cfg, klass->element_class, obj_reg, FALSE, NULL);
3782 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
3783 reset_cast_details (cfg);
3786 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), obj_reg, sizeof (MonoObject));
3787 MONO_ADD_INS (cfg->cbb, add);
3788 add->type = STACK_MP;
3795 handle_unbox_gsharedvt (MonoCompile *cfg, MonoClass *klass, MonoInst *obj, MonoBasicBlock **out_cbb)
3797 MonoInst *addr, *klass_inst, *is_ref, *args[16];
3798 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
3802 klass_inst = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_KLASS);
3808 args [1] = klass_inst;
3811 obj = mono_emit_jit_icall (cfg, mono_object_castclass_unbox, args);
3813 NEW_BBLOCK (cfg, is_ref_bb);
3814 NEW_BBLOCK (cfg, is_nullable_bb);
3815 NEW_BBLOCK (cfg, end_bb);
3816 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
3817 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 1);
3818 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
3820 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 2);
3821 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
3823 /* This will contain either the address of the unboxed vtype, or an address of the temporary where the ref is stored */
3824 addr_reg = alloc_dreg (cfg, STACK_MP);
3828 NEW_BIALU_IMM (cfg, addr, OP_ADD_IMM, addr_reg, obj->dreg, sizeof (MonoObject));
3829 MONO_ADD_INS (cfg->cbb, addr);
3831 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3834 MONO_START_BB (cfg, is_ref_bb);
3836 /* Save the ref to a temporary */
3837 dreg = alloc_ireg (cfg);
3838 EMIT_NEW_VARLOADA_VREG (cfg, addr, dreg, &klass->byval_arg);
3839 addr->dreg = addr_reg;
3840 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, obj->dreg);
3841 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3844 MONO_START_BB (cfg, is_nullable_bb);
3847 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_NULLABLE_CLASS_UNBOX);
3848 MonoInst *unbox_call;
3849 MonoMethodSignature *unbox_sig;
3852 var = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
3854 unbox_sig = mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
3855 unbox_sig->ret = &klass->byval_arg;
3856 unbox_sig->param_count = 1;
3857 unbox_sig->params [0] = &mono_defaults.object_class->byval_arg;
3858 unbox_call = mono_emit_calli (cfg, unbox_sig, &obj, addr, NULL, NULL);
3860 EMIT_NEW_VARLOADA_VREG (cfg, addr, unbox_call->dreg, &klass->byval_arg);
3861 addr->dreg = addr_reg;
3864 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3867 MONO_START_BB (cfg, end_bb);
3870 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr_reg, 0);
3872 *out_cbb = cfg->cbb;
3878 * Returns NULL and set the cfg exception on error.
3881 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
3883 MonoInst *iargs [2];
3889 MonoInst *iargs [2];
3890 gboolean known_instance_size = !mini_is_gsharedvt_klass (cfg, klass);
3892 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (klass, for_box, known_instance_size);
3894 if (cfg->opt & MONO_OPT_SHARED)
3895 rgctx_info = MONO_RGCTX_INFO_KLASS;
3897 rgctx_info = MONO_RGCTX_INFO_VTABLE;
3898 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
3900 if (cfg->opt & MONO_OPT_SHARED) {
3901 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3903 alloc_ftn = mono_object_new;
3906 alloc_ftn = mono_object_new_specific;
3909 if (managed_alloc && !(cfg->opt & MONO_OPT_SHARED)) {
3910 if (known_instance_size)
3911 EMIT_NEW_ICONST (cfg, iargs [1], mono_gc_get_aligned_size_for_allocator (klass->instance_size));
3912 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3915 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3918 if (cfg->opt & MONO_OPT_SHARED) {
3919 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3920 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
3922 alloc_ftn = mono_object_new;
3923 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
3924 /* This happens often in argument checking code, eg. throw new FooException... */
3925 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
3926 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
3927 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
3929 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3930 MonoMethod *managed_alloc = NULL;
3934 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
3935 cfg->exception_ptr = klass;
3939 #ifndef MONO_CROSS_COMPILE
3940 managed_alloc = mono_gc_get_managed_allocator (klass, for_box, TRUE);
3943 if (managed_alloc) {
3944 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3945 EMIT_NEW_ICONST (cfg, iargs [1], mono_gc_get_aligned_size_for_allocator (klass->instance_size));
3946 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3948 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
3950 guint32 lw = vtable->klass->instance_size;
3951 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
3952 EMIT_NEW_ICONST (cfg, iargs [0], lw);
3953 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
3956 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3960 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3964 * Returns NULL and set the cfg exception on error.
3967 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used, MonoBasicBlock **out_cbb)
3969 MonoInst *alloc, *ins;
3971 *out_cbb = cfg->cbb;
3973 if (mono_class_is_nullable (klass)) {
3974 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
3977 /* FIXME: What if the class is shared? We might not
3978 have to get the method address from the RGCTX. */
3979 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
3980 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3981 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3983 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
3985 gboolean pass_vtable, pass_mrgctx;
3986 MonoInst *rgctx_arg = NULL;
3988 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
3989 g_assert (!pass_mrgctx);
3992 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3995 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
3998 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
4002 if (mini_is_gsharedvt_klass (cfg, klass)) {
4003 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
4004 MonoInst *res, *is_ref, *src_var, *addr;
4007 dreg = alloc_ireg (cfg);
4009 NEW_BBLOCK (cfg, is_ref_bb);
4010 NEW_BBLOCK (cfg, is_nullable_bb);
4011 NEW_BBLOCK (cfg, end_bb);
4012 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
4013 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 1);
4014 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
4016 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 2);
4017 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
4020 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4023 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4024 ins->opcode = OP_STOREV_MEMBASE;
4026 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, alloc->dreg);
4027 res->type = STACK_OBJ;
4029 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4032 MONO_START_BB (cfg, is_ref_bb);
4033 addr_reg = alloc_ireg (cfg);
4035 /* val is a vtype, so has to load the value manually */
4036 src_var = get_vreg_to_inst (cfg, val->dreg);
4038 src_var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, val->dreg);
4039 EMIT_NEW_VARLOADA (cfg, addr, src_var, src_var->inst_vtype);
4040 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, addr->dreg, 0);
4041 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4044 MONO_START_BB (cfg, is_nullable_bb);
4047 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass,
4048 MONO_RGCTX_INFO_NULLABLE_CLASS_BOX);
4050 MonoMethodSignature *box_sig;
4053 * klass is Nullable<T>, need to call Nullable<T>.Box () using a gsharedvt signature, but we cannot
4054 * construct that method at JIT time, so have to do things by hand.
4056 box_sig = mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
4057 box_sig->ret = &mono_defaults.object_class->byval_arg;
4058 box_sig->param_count = 1;
4059 box_sig->params [0] = &klass->byval_arg;
4060 box_call = mono_emit_calli (cfg, box_sig, &val, addr, NULL, NULL);
4061 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, box_call->dreg);
4062 res->type = STACK_OBJ;
4066 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4068 MONO_START_BB (cfg, end_bb);
4070 *out_cbb = cfg->cbb;
4074 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4078 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4085 mini_class_has_reference_variant_generic_argument (MonoCompile *cfg, MonoClass *klass, int context_used)
4088 MonoGenericContainer *container;
4089 MonoGenericInst *ginst;
4091 if (klass->generic_class) {
4092 container = klass->generic_class->container_class->generic_container;
4093 ginst = klass->generic_class->context.class_inst;
4094 } else if (klass->generic_container && context_used) {
4095 container = klass->generic_container;
4096 ginst = container->context.class_inst;
4101 for (i = 0; i < container->type_argc; ++i) {
4103 if (!(mono_generic_container_get_param_info (container, i)->flags & (MONO_GEN_PARAM_VARIANT|MONO_GEN_PARAM_COVARIANT)))
4105 type = ginst->type_argv [i];
4106 if (mini_type_is_reference (cfg, type))
4112 #define is_complex_isinst(klass) ((klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || mono_class_is_marshalbyref (klass) || (klass->flags & TYPE_ATTRIBUTE_SEALED) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
4115 emit_castclass_with_cache (MonoCompile *cfg, MonoClass *klass, MonoInst **args, MonoBasicBlock **out_bblock)
4117 MonoMethod *mono_castclass;
4120 mono_castclass = mono_marshal_get_castclass_with_cache ();
4122 save_cast_details (cfg, klass, args [0]->dreg, TRUE, out_bblock);
4123 res = mono_emit_method_call (cfg, mono_castclass, args, NULL);
4124 reset_cast_details (cfg);
4125 *out_bblock = cfg->cbb;
4131 emit_castclass_with_cache_nonshared (MonoCompile *cfg, MonoInst *obj, MonoClass *klass, MonoBasicBlock **out_bblock)
4140 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
4143 if (cfg->compile_aot) {
4144 /* Each CASTCLASS_CACHE patch needs a unique index which identifies the call site */
4145 cfg->castclass_cache_index ++;
4146 idx = (cfg->method_index << 16) | cfg->castclass_cache_index;
4147 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, GINT_TO_POINTER (idx));
4149 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
4152 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
4154 return emit_castclass_with_cache (cfg, klass, args, out_bblock);
4158 * Returns NULL and set the cfg exception on error.
4161 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, guint8 *ip, MonoBasicBlock **out_bb, int *inline_costs)
4163 MonoBasicBlock *is_null_bb;
4164 int obj_reg = src->dreg;
4165 int vtable_reg = alloc_preg (cfg);
4167 MonoInst *klass_inst = NULL, *res;
4168 MonoBasicBlock *bblock;
4172 context_used = mini_class_check_context_used (cfg, klass);
4174 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
4175 res = emit_castclass_with_cache_nonshared (cfg, src, klass, &bblock);
4176 (*inline_costs) += 2;
4179 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
4180 MonoMethod *mono_castclass;
4181 MonoInst *iargs [1];
4184 mono_castclass = mono_marshal_get_castclass (klass);
4187 save_cast_details (cfg, klass, src->dreg, TRUE, &bblock);
4188 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
4189 iargs, ip, cfg->real_offset, TRUE, &bblock);
4190 reset_cast_details (cfg);
4191 CHECK_CFG_EXCEPTION;
4192 g_assert (costs > 0);
4194 cfg->real_offset += 5;
4196 (*inline_costs) += costs;
4205 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
4206 MonoInst *cache_ins;
4208 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
4213 /* klass - it's the second element of the cache entry*/
4214 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
4217 args [2] = cache_ins;
4219 return emit_castclass_with_cache (cfg, klass, args, out_bb);
4222 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
4225 NEW_BBLOCK (cfg, is_null_bb);
4227 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4228 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
4230 save_cast_details (cfg, klass, obj_reg, FALSE, NULL);
4232 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4233 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4234 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
4236 int klass_reg = alloc_preg (cfg);
4238 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4240 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4241 /* the remoting code is broken, access the class for now */
4242 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4243 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4245 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4246 cfg->exception_ptr = klass;
4249 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4251 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4252 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4254 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
4256 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4257 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
4261 MONO_START_BB (cfg, is_null_bb);
4263 reset_cast_details (cfg);
4274 * Returns NULL and set the cfg exception on error.
4277 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
4280 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
4281 int obj_reg = src->dreg;
4282 int vtable_reg = alloc_preg (cfg);
4283 int res_reg = alloc_ireg_ref (cfg);
4284 MonoInst *klass_inst = NULL;
4289 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
4290 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
4291 MonoInst *cache_ins;
4293 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
4298 /* klass - it's the second element of the cache entry*/
4299 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
4302 args [2] = cache_ins;
4304 return mono_emit_method_call (cfg, mono_isinst, args, NULL);
4307 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
4310 NEW_BBLOCK (cfg, is_null_bb);
4311 NEW_BBLOCK (cfg, false_bb);
4312 NEW_BBLOCK (cfg, end_bb);
4314 /* Do the assignment at the beginning, so the other assignment can be if converted */
4315 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
4316 ins->type = STACK_OBJ;
4319 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4320 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
4322 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4324 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4325 g_assert (!context_used);
4326 /* the is_null_bb target simply copies the input register to the output */
4327 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
4329 int klass_reg = alloc_preg (cfg);
4332 int rank_reg = alloc_preg (cfg);
4333 int eclass_reg = alloc_preg (cfg);
4335 g_assert (!context_used);
4336 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
4337 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
4338 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4339 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4340 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, cast_class));
4341 if (klass->cast_class == mono_defaults.object_class) {
4342 int parent_reg = alloc_preg (cfg);
4343 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, MONO_STRUCT_OFFSET (MonoClass, parent));
4344 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
4345 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4346 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4347 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
4348 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
4349 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4350 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4351 } else if (klass->cast_class == mono_defaults.enum_class) {
4352 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4353 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4354 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
4355 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4357 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
4358 /* Check that the object is a vector too */
4359 int bounds_reg = alloc_preg (cfg);
4360 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, MONO_STRUCT_OFFSET (MonoArray, bounds));
4361 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
4362 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4365 /* the is_null_bb target simply copies the input register to the output */
4366 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4368 } else if (mono_class_is_nullable (klass)) {
4369 g_assert (!context_used);
4370 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4371 /* the is_null_bb target simply copies the input register to the output */
4372 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
4374 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4375 g_assert (!context_used);
4376 /* the remoting code is broken, access the class for now */
4377 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4378 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4380 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4381 cfg->exception_ptr = klass;
4384 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4386 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4387 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4389 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4390 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
4392 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4393 /* the is_null_bb target simply copies the input register to the output */
4394 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
4399 MONO_START_BB (cfg, false_bb);
4401 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
4402 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4404 MONO_START_BB (cfg, is_null_bb);
4406 MONO_START_BB (cfg, end_bb);
4412 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4414 /* This opcode takes as input an object reference and a class, and returns:
4415 0) if the object is an instance of the class,
4416 1) if the object is not instance of the class,
4417 2) if the object is a proxy whose type cannot be determined */
4420 #ifndef DISABLE_REMOTING
4421 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
4423 MonoBasicBlock *true_bb, *false_bb, *end_bb;
4425 int obj_reg = src->dreg;
4426 int dreg = alloc_ireg (cfg);
4428 #ifndef DISABLE_REMOTING
4429 int klass_reg = alloc_preg (cfg);
4432 NEW_BBLOCK (cfg, true_bb);
4433 NEW_BBLOCK (cfg, false_bb);
4434 NEW_BBLOCK (cfg, end_bb);
4435 #ifndef DISABLE_REMOTING
4436 NEW_BBLOCK (cfg, false2_bb);
4437 NEW_BBLOCK (cfg, no_proxy_bb);
4440 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4441 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
4443 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4444 #ifndef DISABLE_REMOTING
4445 NEW_BBLOCK (cfg, interface_fail_bb);
4448 tmp_reg = alloc_preg (cfg);
4449 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4450 #ifndef DISABLE_REMOTING
4451 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
4452 MONO_START_BB (cfg, interface_fail_bb);
4453 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4455 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
4457 tmp_reg = alloc_preg (cfg);
4458 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4459 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4460 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
4462 mini_emit_iface_cast (cfg, tmp_reg, klass, false_bb, true_bb);
4465 #ifndef DISABLE_REMOTING
4466 tmp_reg = alloc_preg (cfg);
4467 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4468 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4470 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4471 tmp_reg = alloc_preg (cfg);
4472 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4473 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4475 tmp_reg = alloc_preg (cfg);
4476 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4477 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4478 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4480 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
4481 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
4483 MONO_START_BB (cfg, no_proxy_bb);
4485 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
4487 g_error ("transparent proxy support is disabled while trying to JIT code that uses it");
4491 MONO_START_BB (cfg, false_bb);
4493 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4494 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4496 #ifndef DISABLE_REMOTING
4497 MONO_START_BB (cfg, false2_bb);
4499 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
4500 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4503 MONO_START_BB (cfg, true_bb);
4505 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4507 MONO_START_BB (cfg, end_bb);
4510 MONO_INST_NEW (cfg, ins, OP_ICONST);
4512 ins->type = STACK_I4;
4518 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4520 /* This opcode takes as input an object reference and a class, and returns:
4521 0) if the object is an instance of the class,
4522 1) if the object is a proxy whose type cannot be determined
4523 an InvalidCastException exception is thrown otherwhise*/
4526 #ifndef DISABLE_REMOTING
4527 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
4529 MonoBasicBlock *ok_result_bb;
4531 int obj_reg = src->dreg;
4532 int dreg = alloc_ireg (cfg);
4533 int tmp_reg = alloc_preg (cfg);
4535 #ifndef DISABLE_REMOTING
4536 int klass_reg = alloc_preg (cfg);
4537 NEW_BBLOCK (cfg, end_bb);
4540 NEW_BBLOCK (cfg, ok_result_bb);
4542 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4543 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
4545 save_cast_details (cfg, klass, obj_reg, FALSE, NULL);
4547 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4548 #ifndef DISABLE_REMOTING
4549 NEW_BBLOCK (cfg, interface_fail_bb);
4551 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4552 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
4553 MONO_START_BB (cfg, interface_fail_bb);
4554 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4556 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
4558 tmp_reg = alloc_preg (cfg);
4559 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4560 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4561 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
4563 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4564 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4566 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4567 mini_emit_iface_cast (cfg, tmp_reg, klass, NULL, NULL);
4568 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, ok_result_bb);
4571 #ifndef DISABLE_REMOTING
4572 NEW_BBLOCK (cfg, no_proxy_bb);
4574 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4575 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4576 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4578 tmp_reg = alloc_preg (cfg);
4579 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4580 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4582 tmp_reg = alloc_preg (cfg);
4583 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4584 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4585 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4587 NEW_BBLOCK (cfg, fail_1_bb);
4589 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
4591 MONO_START_BB (cfg, fail_1_bb);
4593 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4594 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4596 MONO_START_BB (cfg, no_proxy_bb);
4598 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
4600 g_error ("Transparent proxy support is disabled while trying to JIT code that uses it");
4604 MONO_START_BB (cfg, ok_result_bb);
4606 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4608 #ifndef DISABLE_REMOTING
4609 MONO_START_BB (cfg, end_bb);
4613 MONO_INST_NEW (cfg, ins, OP_ICONST);
4615 ins->type = STACK_I4;
4621 * Returns NULL and set the cfg exception on error.
4623 static G_GNUC_UNUSED MonoInst*
4624 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used, gboolean virtual)
4628 gpointer trampoline;
4629 MonoInst *obj, *method_ins, *tramp_ins;
4633 // FIXME reenable optimisation for virtual case
4638 MonoMethod *invoke = mono_get_delegate_invoke (klass);
4641 if (!mono_get_delegate_virtual_invoke_impl (mono_method_signature (invoke), context_used ? NULL : method))
4645 obj = handle_alloc (cfg, klass, FALSE, 0);
4649 /* Inline the contents of mono_delegate_ctor */
4651 /* Set target field */
4652 /* Optimize away setting of NULL target */
4653 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0)) {
4654 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
4655 if (cfg->gen_write_barriers) {
4656 dreg = alloc_preg (cfg);
4657 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target));
4658 emit_write_barrier (cfg, ptr, target);
4662 /* Set method field */
4663 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
4664 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
4667 * To avoid looking up the compiled code belonging to the target method
4668 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
4669 * store it, and we fill it after the method has been compiled.
4671 if (!method->dynamic && !(cfg->opt & MONO_OPT_SHARED)) {
4672 MonoInst *code_slot_ins;
4675 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
4677 domain = mono_domain_get ();
4678 mono_domain_lock (domain);
4679 if (!domain_jit_info (domain)->method_code_hash)
4680 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
4681 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
4683 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
4684 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
4686 mono_domain_unlock (domain);
4688 if (cfg->compile_aot)
4689 EMIT_NEW_AOTCONST (cfg, code_slot_ins, MONO_PATCH_INFO_METHOD_CODE_SLOT, method);
4691 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
4693 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
4696 if (cfg->compile_aot) {
4697 MonoDelegateClassMethodPair *del_tramp;
4699 del_tramp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoDelegateClassMethodPair));
4700 del_tramp->klass = klass;
4701 del_tramp->method = context_used ? NULL : method;
4702 del_tramp->virtual = virtual;
4703 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, del_tramp);
4706 trampoline = mono_create_delegate_virtual_trampoline (cfg->domain, klass, context_used ? NULL : method);
4708 trampoline = mono_create_delegate_trampoline_info (cfg->domain, klass, context_used ? NULL : method);
4709 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
4712 /* Set invoke_impl field */
4714 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
4716 dreg = alloc_preg (cfg);
4717 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, invoke_impl));
4718 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), dreg);
4720 dreg = alloc_preg (cfg);
4721 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, method_ptr));
4722 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr), dreg);
4725 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
4731 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
4733 MonoJitICallInfo *info;
4735 /* Need to register the icall so it gets an icall wrapper */
4736 info = mono_get_array_new_va_icall (rank);
4738 cfg->flags |= MONO_CFG_HAS_VARARGS;
4740 /* mono_array_new_va () needs a vararg calling convention */
4741 cfg->disable_llvm = TRUE;
4743 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
4744 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
4748 mono_emit_load_got_addr (MonoCompile *cfg)
4750 MonoInst *getaddr, *dummy_use;
4752 if (!cfg->got_var || cfg->got_var_allocated)
4755 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
4756 getaddr->cil_code = cfg->header->code;
4757 getaddr->dreg = cfg->got_var->dreg;
4759 /* Add it to the start of the first bblock */
4760 if (cfg->bb_entry->code) {
4761 getaddr->next = cfg->bb_entry->code;
4762 cfg->bb_entry->code = getaddr;
4765 MONO_ADD_INS (cfg->bb_entry, getaddr);
4767 cfg->got_var_allocated = TRUE;
4770 * Add a dummy use to keep the got_var alive, since real uses might
4771 * only be generated by the back ends.
4772 * Add it to end_bblock, so the variable's lifetime covers the whole
4774 * It would be better to make the usage of the got var explicit in all
4775 * cases when the backend needs it (i.e. calls, throw etc.), so this
4776 * wouldn't be needed.
4778 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
4779 MONO_ADD_INS (cfg->bb_exit, dummy_use);
4782 static int inline_limit;
4783 static gboolean inline_limit_inited;
4786 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
4788 MonoMethodHeaderSummary header;
4790 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
4791 MonoMethodSignature *sig = mono_method_signature (method);
4795 if (cfg->disable_inline)
4797 if (cfg->generic_sharing_context)
4800 if (cfg->inline_depth > 10)
4803 #ifdef MONO_ARCH_HAVE_LMF_OPS
4804 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
4805 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
4806 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
4811 if (!mono_method_get_header_summary (method, &header))
4814 /*runtime, icall and pinvoke are checked by summary call*/
4815 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
4816 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
4817 (mono_class_is_marshalbyref (method->klass)) ||
4821 /* also consider num_locals? */
4822 /* Do the size check early to avoid creating vtables */
4823 if (!inline_limit_inited) {
4824 if (g_getenv ("MONO_INLINELIMIT"))
4825 inline_limit = atoi (g_getenv ("MONO_INLINELIMIT"));
4827 inline_limit = INLINE_LENGTH_LIMIT;
4828 inline_limit_inited = TRUE;
4830 if (header.code_size >= inline_limit && !(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
4834 * if we can initialize the class of the method right away, we do,
4835 * otherwise we don't allow inlining if the class needs initialization,
4836 * since it would mean inserting a call to mono_runtime_class_init()
4837 * inside the inlined code
4839 if (!(cfg->opt & MONO_OPT_SHARED)) {
4840 /* The AggressiveInlining hint is a good excuse to force that cctor to run. */
4841 if (method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING) {
4842 vtable = mono_class_vtable (cfg->domain, method->klass);
4845 if (!cfg->compile_aot)
4846 mono_runtime_class_init (vtable);
4847 } else if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
4848 if (cfg->run_cctors && method->klass->has_cctor) {
4849 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
4850 if (!method->klass->runtime_info)
4851 /* No vtable created yet */
4853 vtable = mono_class_vtable (cfg->domain, method->klass);
4856 /* This makes so that inline cannot trigger */
4857 /* .cctors: too many apps depend on them */
4858 /* running with a specific order... */
4859 if (! vtable->initialized)
4861 mono_runtime_class_init (vtable);
4863 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
4864 if (!method->klass->runtime_info)
4865 /* No vtable created yet */
4867 vtable = mono_class_vtable (cfg->domain, method->klass);
4870 if (!vtable->initialized)
4875 * If we're compiling for shared code
4876 * the cctor will need to be run at aot method load time, for example,
4877 * or at the end of the compilation of the inlining method.
4879 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
4884 * CAS - do not inline methods with declarative security
4885 * Note: this has to be before any possible return TRUE;
4887 if (mono_security_method_has_declsec (method))
4890 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
4891 if (mono_arch_is_soft_float ()) {
4893 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
4895 for (i = 0; i < sig->param_count; ++i)
4896 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
4901 if (g_list_find (cfg->dont_inline, method))
4908 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoClass *klass, MonoVTable *vtable)
4910 if (!cfg->compile_aot) {
4912 if (vtable->initialized)
4916 if (klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
4917 if (cfg->method == method)
4921 if (!mono_class_needs_cctor_run (klass, method))
4924 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (klass == method->klass))
4925 /* The initialization is already done before the method is called */
4932 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
4936 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
4939 if (mini_is_gsharedvt_variable_klass (cfg, klass)) {
4942 mono_class_init (klass);
4943 size = mono_class_array_element_size (klass);
4946 mult_reg = alloc_preg (cfg);
4947 array_reg = arr->dreg;
4948 index_reg = index->dreg;
4950 #if SIZEOF_REGISTER == 8
4951 /* The array reg is 64 bits but the index reg is only 32 */
4952 if (COMPILE_LLVM (cfg)) {
4954 index2_reg = index_reg;
4956 index2_reg = alloc_preg (cfg);
4957 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
4960 if (index->type == STACK_I8) {
4961 index2_reg = alloc_preg (cfg);
4962 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
4964 index2_reg = index_reg;
4969 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
4971 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4972 if (size == 1 || size == 2 || size == 4 || size == 8) {
4973 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
4975 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], MONO_STRUCT_OFFSET (MonoArray, vector));
4976 ins->klass = mono_class_get_element_class (klass);
4977 ins->type = STACK_MP;
4983 add_reg = alloc_ireg_mp (cfg);
4986 MonoInst *rgctx_ins;
4989 g_assert (cfg->generic_sharing_context);
4990 context_used = mini_class_check_context_used (cfg, klass);
4991 g_assert (context_used);
4992 rgctx_ins = emit_get_gsharedvt_info (cfg, &klass->byval_arg, MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE);
4993 MONO_EMIT_NEW_BIALU (cfg, OP_IMUL, mult_reg, index2_reg, rgctx_ins->dreg);
4995 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
4997 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
4998 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
4999 ins->klass = mono_class_get_element_class (klass);
5000 ins->type = STACK_MP;
5001 MONO_ADD_INS (cfg->cbb, ins);
5006 #ifndef MONO_ARCH_EMULATE_MUL_DIV
5008 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
5010 int bounds_reg = alloc_preg (cfg);
5011 int add_reg = alloc_ireg_mp (cfg);
5012 int mult_reg = alloc_preg (cfg);
5013 int mult2_reg = alloc_preg (cfg);
5014 int low1_reg = alloc_preg (cfg);
5015 int low2_reg = alloc_preg (cfg);
5016 int high1_reg = alloc_preg (cfg);
5017 int high2_reg = alloc_preg (cfg);
5018 int realidx1_reg = alloc_preg (cfg);
5019 int realidx2_reg = alloc_preg (cfg);
5020 int sum_reg = alloc_preg (cfg);
5021 int index1, index2, tmpreg;
5025 mono_class_init (klass);
5026 size = mono_class_array_element_size (klass);
5028 index1 = index_ins1->dreg;
5029 index2 = index_ins2->dreg;
5031 #if SIZEOF_REGISTER == 8
5032 /* The array reg is 64 bits but the index reg is only 32 */
5033 if (COMPILE_LLVM (cfg)) {
5036 tmpreg = alloc_preg (cfg);
5037 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index1);
5039 tmpreg = alloc_preg (cfg);
5040 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index2);
5044 // FIXME: Do we need to do something here for i8 indexes, like in ldelema_1_ins ?
5048 /* range checking */
5049 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
5050 arr->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
5052 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
5053 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5054 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
5055 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
5056 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5057 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
5058 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
5060 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
5061 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5062 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
5063 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
5064 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5065 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
5066 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
5068 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
5069 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
5070 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
5071 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
5072 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
5074 ins->type = STACK_MP;
5076 MONO_ADD_INS (cfg->cbb, ins);
5083 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
5087 MonoMethod *addr_method;
5090 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
5093 return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], TRUE);
5095 #ifndef MONO_ARCH_EMULATE_MUL_DIV
5096 /* emit_ldelema_2 depends on OP_LMUL */
5097 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS)) {
5098 return mini_emit_ldelema_2_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], sp [2]);
5102 element_size = mono_class_array_element_size (cmethod->klass->element_class);
5103 addr_method = mono_marshal_get_array_address (rank, element_size);
5104 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
5109 static MonoBreakPolicy
5110 always_insert_breakpoint (MonoMethod *method)
5112 return MONO_BREAK_POLICY_ALWAYS;
5115 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
5118 * mono_set_break_policy:
5119 * policy_callback: the new callback function
5121 * Allow embedders to decide wherther to actually obey breakpoint instructions
5122 * (both break IL instructions and Debugger.Break () method calls), for example
5123 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
5124 * untrusted or semi-trusted code.
5126 * @policy_callback will be called every time a break point instruction needs to
5127 * be inserted with the method argument being the method that calls Debugger.Break()
5128 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
5129 * if it wants the breakpoint to not be effective in the given method.
5130 * #MONO_BREAK_POLICY_ALWAYS is the default.
5133 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
5135 if (policy_callback)
5136 break_policy_func = policy_callback;
5138 break_policy_func = always_insert_breakpoint;
5142 should_insert_brekpoint (MonoMethod *method) {
5143 switch (break_policy_func (method)) {
5144 case MONO_BREAK_POLICY_ALWAYS:
5146 case MONO_BREAK_POLICY_NEVER:
5148 case MONO_BREAK_POLICY_ON_DBG:
5149 g_warning ("mdb no longer supported");
5152 g_warning ("Incorrect value returned from break policy callback");
5157 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
5159 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
5161 MonoInst *addr, *store, *load;
5162 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
5164 /* the bounds check is already done by the callers */
5165 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
5167 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
5168 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
5169 if (mini_type_is_reference (cfg, fsig->params [2]))
5170 emit_write_barrier (cfg, addr, load);
5172 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
5173 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
5180 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
5182 return mini_type_is_reference (cfg, &klass->byval_arg);
5186 emit_array_store (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, gboolean safety_checks)
5188 if (safety_checks && generic_class_is_reference_type (cfg, klass) &&
5189 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
5190 MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class, 1);
5191 MonoMethod *helper = mono_marshal_get_virtual_stelemref (obj_array);
5192 MonoInst *iargs [3];
5195 mono_class_setup_vtable (obj_array);
5196 g_assert (helper->slot);
5198 if (sp [0]->type != STACK_OBJ)
5200 if (sp [2]->type != STACK_OBJ)
5207 return mono_emit_method_call (cfg, helper, iargs, sp [0]);
5211 if (mini_is_gsharedvt_variable_klass (cfg, klass)) {
5214 // FIXME-VT: OP_ICONST optimization
5215 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
5216 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5217 ins->opcode = OP_STOREV_MEMBASE;
5218 } else if (sp [1]->opcode == OP_ICONST) {
5219 int array_reg = sp [0]->dreg;
5220 int index_reg = sp [1]->dreg;
5221 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
5224 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
5225 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
5227 MonoInst *addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], safety_checks);
5228 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5229 if (generic_class_is_reference_type (cfg, klass))
5230 emit_write_barrier (cfg, addr, sp [2]);
5237 emit_array_unsafe_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
5242 eklass = mono_class_from_mono_type (fsig->params [2]);
5244 eklass = mono_class_from_mono_type (fsig->ret);
5247 return emit_array_store (cfg, eklass, args, FALSE);
5249 MonoInst *ins, *addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
5250 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &eklass->byval_arg, addr->dreg, 0);
5256 is_unsafe_mov_compatible (MonoClass *param_klass, MonoClass *return_klass)
5260 //Only allow for valuetypes
5261 if (!param_klass->valuetype || !return_klass->valuetype)
5265 if (param_klass->has_references || return_klass->has_references)
5268 /* Avoid mixing structs and primitive types/enums, they need to be handled differently in the JIT */
5269 if ((MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && !MONO_TYPE_ISSTRUCT (&return_klass->byval_arg)) ||
5270 (!MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && MONO_TYPE_ISSTRUCT (&return_klass->byval_arg)))
5273 if (param_klass->byval_arg.type == MONO_TYPE_R4 || param_klass->byval_arg.type == MONO_TYPE_R8 ||
5274 return_klass->byval_arg.type == MONO_TYPE_R4 || return_klass->byval_arg.type == MONO_TYPE_R8)
5277 //And have the same size
5278 if (mono_class_value_size (param_klass, &align) != mono_class_value_size (return_klass, &align))
5284 emit_array_unsafe_mov (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args)
5286 MonoClass *param_klass = mono_class_from_mono_type (fsig->params [0]);
5287 MonoClass *return_klass = mono_class_from_mono_type (fsig->ret);
5289 //Valuetypes that are semantically equivalent
5290 if (is_unsafe_mov_compatible (param_klass, return_klass))
5293 //Arrays of valuetypes that are semantically equivalent
5294 if (param_klass->rank == 1 && return_klass->rank == 1 && is_unsafe_mov_compatible (param_klass->element_class, return_klass->element_class))
5301 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5303 #ifdef MONO_ARCH_SIMD_INTRINSICS
5304 MonoInst *ins = NULL;
5306 if (cfg->opt & MONO_OPT_SIMD) {
5307 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
5313 return mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
5317 emit_memory_barrier (MonoCompile *cfg, int kind)
5319 MonoInst *ins = NULL;
5320 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
5321 MONO_ADD_INS (cfg->cbb, ins);
5322 ins->backend.memory_barrier_kind = kind;
5328 llvm_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5330 MonoInst *ins = NULL;
5333 /* The LLVM backend supports these intrinsics */
5334 if (cmethod->klass == mono_defaults.math_class) {
5335 if (strcmp (cmethod->name, "Sin") == 0) {
5337 } else if (strcmp (cmethod->name, "Cos") == 0) {
5339 } else if (strcmp (cmethod->name, "Sqrt") == 0) {
5341 } else if (strcmp (cmethod->name, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8) {
5346 MONO_INST_NEW (cfg, ins, opcode);
5347 ins->type = STACK_R8;
5348 ins->dreg = mono_alloc_freg (cfg);
5349 ins->sreg1 = args [0]->dreg;
5350 MONO_ADD_INS (cfg->cbb, ins);
5354 if (cfg->opt & MONO_OPT_CMOV) {
5355 if (strcmp (cmethod->name, "Min") == 0) {
5356 if (fsig->params [0]->type == MONO_TYPE_I4)
5358 if (fsig->params [0]->type == MONO_TYPE_U4)
5359 opcode = OP_IMIN_UN;
5360 else if (fsig->params [0]->type == MONO_TYPE_I8)
5362 else if (fsig->params [0]->type == MONO_TYPE_U8)
5363 opcode = OP_LMIN_UN;
5364 } else if (strcmp (cmethod->name, "Max") == 0) {
5365 if (fsig->params [0]->type == MONO_TYPE_I4)
5367 if (fsig->params [0]->type == MONO_TYPE_U4)
5368 opcode = OP_IMAX_UN;
5369 else if (fsig->params [0]->type == MONO_TYPE_I8)
5371 else if (fsig->params [0]->type == MONO_TYPE_U8)
5372 opcode = OP_LMAX_UN;
5377 MONO_INST_NEW (cfg, ins, opcode);
5378 ins->type = fsig->params [0]->type == MONO_TYPE_I4 ? STACK_I4 : STACK_I8;
5379 ins->dreg = mono_alloc_ireg (cfg);
5380 ins->sreg1 = args [0]->dreg;
5381 ins->sreg2 = args [1]->dreg;
5382 MONO_ADD_INS (cfg->cbb, ins);
5390 mini_emit_inst_for_sharable_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5392 if (cmethod->klass == mono_defaults.array_class) {
5393 if (strcmp (cmethod->name, "UnsafeStore") == 0)
5394 return emit_array_unsafe_access (cfg, fsig, args, TRUE);
5395 else if (strcmp (cmethod->name, "UnsafeLoad") == 0)
5396 return emit_array_unsafe_access (cfg, fsig, args, FALSE);
5397 else if (strcmp (cmethod->name, "UnsafeMov") == 0)
5398 return emit_array_unsafe_mov (cfg, fsig, args);
5405 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5407 MonoInst *ins = NULL;
5409 static MonoClass *runtime_helpers_class = NULL;
5410 if (! runtime_helpers_class)
5411 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
5412 "System.Runtime.CompilerServices", "RuntimeHelpers");
5414 if (cmethod->klass == mono_defaults.string_class) {
5415 if (strcmp (cmethod->name, "get_Chars") == 0) {
5416 int dreg = alloc_ireg (cfg);
5417 int index_reg = alloc_preg (cfg);
5418 int mult_reg = alloc_preg (cfg);
5419 int add_reg = alloc_preg (cfg);
5421 #if SIZEOF_REGISTER == 8
5422 /* The array reg is 64 bits but the index reg is only 32 */
5423 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
5425 index_reg = args [1]->dreg;
5427 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
5429 #if defined(TARGET_X86) || defined(TARGET_AMD64)
5430 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, MONO_STRUCT_OFFSET (MonoString, chars));
5431 add_reg = ins->dreg;
5432 /* Avoid a warning */
5434 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5437 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
5438 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
5439 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5440 add_reg, MONO_STRUCT_OFFSET (MonoString, chars));
5442 type_from_op (ins, NULL, NULL);
5444 } else if (strcmp (cmethod->name, "get_Length") == 0) {
5445 int dreg = alloc_ireg (cfg);
5446 /* Decompose later to allow more optimizations */
5447 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
5448 ins->type = STACK_I4;
5449 ins->flags |= MONO_INST_FAULT;
5450 cfg->cbb->has_array_access = TRUE;
5451 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
5454 } else if (strcmp (cmethod->name, "InternalSetChar") == 0) {
5455 int mult_reg = alloc_preg (cfg);
5456 int add_reg = alloc_preg (cfg);
5458 /* The corlib functions check for oob already. */
5459 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
5460 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
5461 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, MONO_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
5462 return cfg->cbb->last_ins;
5465 } else if (cmethod->klass == mono_defaults.object_class) {
5467 if (strcmp (cmethod->name, "GetType") == 0) {
5468 int dreg = alloc_ireg_ref (cfg);
5469 int vt_reg = alloc_preg (cfg);
5470 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
5471 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, MONO_STRUCT_OFFSET (MonoVTable, type));
5472 type_from_op (ins, NULL, NULL);
5475 #if !defined(MONO_ARCH_EMULATE_MUL_DIV)
5476 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0 && !mono_gc_is_moving ()) {
5477 int dreg = alloc_ireg (cfg);
5478 int t1 = alloc_ireg (cfg);
5480 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
5481 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
5482 ins->type = STACK_I4;
5486 } else if (strcmp (cmethod->name, ".ctor") == 0) {
5487 MONO_INST_NEW (cfg, ins, OP_NOP);
5488 MONO_ADD_INS (cfg->cbb, ins);
5492 } else if (cmethod->klass == mono_defaults.array_class) {
5493 if (!cfg->gsharedvt && strcmp (cmethod->name + 1, "etGenericValueImpl") == 0)
5494 return emit_array_generic_access (cfg, fsig, args, *cmethod->name == 'S');
5496 #ifndef MONO_BIG_ARRAYS
5498 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
5501 if ((strcmp (cmethod->name, "GetLength") == 0 || strcmp (cmethod->name, "GetLowerBound") == 0) && args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
5502 int dreg = alloc_ireg (cfg);
5503 int bounds_reg = alloc_ireg_mp (cfg);
5504 MonoBasicBlock *end_bb, *szarray_bb;
5505 gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
5507 NEW_BBLOCK (cfg, end_bb);
5508 NEW_BBLOCK (cfg, szarray_bb);
5510 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
5511 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
5512 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
5513 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
5514 /* Non-szarray case */
5516 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5517 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5519 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5520 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5521 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
5522 MONO_START_BB (cfg, szarray_bb);
5525 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5526 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
5528 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
5529 MONO_START_BB (cfg, end_bb);
5531 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
5532 ins->type = STACK_I4;
5538 if (cmethod->name [0] != 'g')
5541 if (strcmp (cmethod->name, "get_Rank") == 0) {
5542 int dreg = alloc_ireg (cfg);
5543 int vtable_reg = alloc_preg (cfg);
5544 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
5545 args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
5546 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
5547 vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
5548 type_from_op (ins, NULL, NULL);
5551 } else if (strcmp (cmethod->name, "get_Length") == 0) {
5552 int dreg = alloc_ireg (cfg);
5554 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5555 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
5556 type_from_op (ins, NULL, NULL);
5561 } else if (cmethod->klass == runtime_helpers_class) {
5563 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0) {
5564 EMIT_NEW_ICONST (cfg, ins, MONO_STRUCT_OFFSET (MonoString, chars));
5568 } else if (cmethod->klass == mono_defaults.thread_class) {
5569 if (strcmp (cmethod->name, "SpinWait_nop") == 0) {
5570 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
5571 MONO_ADD_INS (cfg->cbb, ins);
5573 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0) {
5574 return emit_memory_barrier (cfg, FullBarrier);
5576 } else if (cmethod->klass == mono_defaults.monitor_class) {
5577 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
5578 if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 1) {
5581 if (COMPILE_LLVM (cfg)) {
5583 * Pass the argument normally, the LLVM backend will handle the
5584 * calling convention problems.
5586 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
5588 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
5589 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
5590 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
5591 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
5594 return (MonoInst*)call;
5595 #if defined(MONO_ARCH_MONITOR_LOCK_TAKEN_REG)
5596 } else if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 2) {
5599 if (COMPILE_LLVM (cfg)) {
5601 * Pass the argument normally, the LLVM backend will handle the
5602 * calling convention problems.
5604 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER_V4, NULL, helper_sig_monitor_enter_v4_trampoline_llvm, args);
5606 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER_V4,
5607 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
5608 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg, MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
5609 mono_call_inst_add_outarg_reg (cfg, call, args [1]->dreg, MONO_ARCH_MONITOR_LOCK_TAKEN_REG, FALSE);
5612 return (MonoInst*)call;
5614 } else if (strcmp (cmethod->name, "Exit") == 0) {
5617 if (COMPILE_LLVM (cfg)) {
5618 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
5620 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
5621 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
5622 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
5623 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
5626 return (MonoInst*)call;
5629 } else if (cmethod->klass->image == mono_defaults.corlib &&
5630 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
5631 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
5634 #if SIZEOF_REGISTER == 8
5635 if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
5638 emit_memory_barrier (cfg, FullBarrier);
5640 /* 64 bit reads are already atomic */
5641 MONO_INST_NEW (cfg, load_ins, OP_LOADI8_MEMBASE);
5642 load_ins->dreg = mono_alloc_preg (cfg);
5643 load_ins->inst_basereg = args [0]->dreg;
5644 load_ins->inst_offset = 0;
5645 MONO_ADD_INS (cfg->cbb, load_ins);
5647 emit_memory_barrier (cfg, FullBarrier);
5653 if (strcmp (cmethod->name, "Increment") == 0) {
5654 MonoInst *ins_iconst;
5657 if (fsig->params [0]->type == MONO_TYPE_I4) {
5658 opcode = OP_ATOMIC_ADD_I4;
5659 cfg->has_atomic_add_i4 = TRUE;
5661 #if SIZEOF_REGISTER == 8
5662 else if (fsig->params [0]->type == MONO_TYPE_I8)
5663 opcode = OP_ATOMIC_ADD_I8;
5666 if (!mono_arch_opcode_supported (opcode))
5668 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
5669 ins_iconst->inst_c0 = 1;
5670 ins_iconst->dreg = mono_alloc_ireg (cfg);
5671 MONO_ADD_INS (cfg->cbb, ins_iconst);
5673 MONO_INST_NEW (cfg, ins, opcode);
5674 ins->dreg = mono_alloc_ireg (cfg);
5675 ins->inst_basereg = args [0]->dreg;
5676 ins->inst_offset = 0;
5677 ins->sreg2 = ins_iconst->dreg;
5678 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
5679 MONO_ADD_INS (cfg->cbb, ins);
5681 } else if (strcmp (cmethod->name, "Decrement") == 0) {
5682 MonoInst *ins_iconst;
5685 if (fsig->params [0]->type == MONO_TYPE_I4) {
5686 opcode = OP_ATOMIC_ADD_I4;
5687 cfg->has_atomic_add_i4 = TRUE;
5689 #if SIZEOF_REGISTER == 8
5690 else if (fsig->params [0]->type == MONO_TYPE_I8)
5691 opcode = OP_ATOMIC_ADD_I8;
5694 if (!mono_arch_opcode_supported (opcode))
5696 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
5697 ins_iconst->inst_c0 = -1;
5698 ins_iconst->dreg = mono_alloc_ireg (cfg);
5699 MONO_ADD_INS (cfg->cbb, ins_iconst);
5701 MONO_INST_NEW (cfg, ins, opcode);
5702 ins->dreg = mono_alloc_ireg (cfg);
5703 ins->inst_basereg = args [0]->dreg;
5704 ins->inst_offset = 0;
5705 ins->sreg2 = ins_iconst->dreg;
5706 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
5707 MONO_ADD_INS (cfg->cbb, ins);
5709 } else if (strcmp (cmethod->name, "Add") == 0) {
5712 if (fsig->params [0]->type == MONO_TYPE_I4) {
5713 opcode = OP_ATOMIC_ADD_I4;
5714 cfg->has_atomic_add_i4 = TRUE;
5716 #if SIZEOF_REGISTER == 8
5717 else if (fsig->params [0]->type == MONO_TYPE_I8)
5718 opcode = OP_ATOMIC_ADD_I8;
5721 if (!mono_arch_opcode_supported (opcode))
5723 MONO_INST_NEW (cfg, ins, opcode);
5724 ins->dreg = mono_alloc_ireg (cfg);
5725 ins->inst_basereg = args [0]->dreg;
5726 ins->inst_offset = 0;
5727 ins->sreg2 = args [1]->dreg;
5728 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
5729 MONO_ADD_INS (cfg->cbb, ins);
5733 if (strcmp (cmethod->name, "Exchange") == 0) {
5735 gboolean is_ref = fsig->params [0]->type == MONO_TYPE_OBJECT;
5737 if (fsig->params [0]->type == MONO_TYPE_I4) {
5738 opcode = OP_ATOMIC_EXCHANGE_I4;
5739 cfg->has_atomic_exchange_i4 = TRUE;
5741 #if SIZEOF_REGISTER == 8
5742 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I8) ||
5743 (fsig->params [0]->type == MONO_TYPE_I))
5744 opcode = OP_ATOMIC_EXCHANGE_I8;
5746 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I)) {
5747 opcode = OP_ATOMIC_EXCHANGE_I4;
5748 cfg->has_atomic_exchange_i4 = TRUE;
5754 if (!mono_arch_opcode_supported (opcode))
5757 MONO_INST_NEW (cfg, ins, opcode);
5758 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : mono_alloc_ireg (cfg);
5759 ins->inst_basereg = args [0]->dreg;
5760 ins->inst_offset = 0;
5761 ins->sreg2 = args [1]->dreg;
5762 MONO_ADD_INS (cfg->cbb, ins);
5764 switch (fsig->params [0]->type) {
5766 ins->type = STACK_I4;
5770 ins->type = STACK_I8;
5772 case MONO_TYPE_OBJECT:
5773 ins->type = STACK_OBJ;
5776 g_assert_not_reached ();
5779 if (cfg->gen_write_barriers && is_ref)
5780 emit_write_barrier (cfg, args [0], args [1]);
5783 if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
5785 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [1]);
5786 if (fsig->params [1]->type == MONO_TYPE_I4)
5788 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I)
5789 size = sizeof (gpointer);
5790 else if (sizeof (gpointer) == 8 && fsig->params [1]->type == MONO_TYPE_I8)
5793 if (!mono_arch_opcode_supported (OP_ATOMIC_CAS_I4))
5795 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
5796 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
5797 ins->sreg1 = args [0]->dreg;
5798 ins->sreg2 = args [1]->dreg;
5799 ins->sreg3 = args [2]->dreg;
5800 ins->type = STACK_I4;
5801 MONO_ADD_INS (cfg->cbb, ins);
5802 cfg->has_atomic_cas_i4 = TRUE;
5803 } else if (size == 8) {
5804 if (!mono_arch_opcode_supported (OP_ATOMIC_CAS_I8))
5806 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I8);
5807 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
5808 ins->sreg1 = args [0]->dreg;
5809 ins->sreg2 = args [1]->dreg;
5810 ins->sreg3 = args [2]->dreg;
5811 ins->type = STACK_I8;
5812 MONO_ADD_INS (cfg->cbb, ins);
5814 /* g_assert_not_reached (); */
5816 if (cfg->gen_write_barriers && is_ref)
5817 emit_write_barrier (cfg, args [0], args [1]);
5820 if (strcmp (cmethod->name, "MemoryBarrier") == 0)
5821 ins = emit_memory_barrier (cfg, FullBarrier);
5825 } else if (cmethod->klass->image == mono_defaults.corlib) {
5826 if (cmethod->name [0] == 'B' && strcmp (cmethod->name, "Break") == 0
5827 && strcmp (cmethod->klass->name, "Debugger") == 0) {
5828 if (should_insert_brekpoint (cfg->method)) {
5829 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
5831 MONO_INST_NEW (cfg, ins, OP_NOP);
5832 MONO_ADD_INS (cfg->cbb, ins);
5836 if (cmethod->name [0] == 'g' && strcmp (cmethod->name, "get_IsRunningOnWindows") == 0
5837 && strcmp (cmethod->klass->name, "Environment") == 0) {
5839 EMIT_NEW_ICONST (cfg, ins, 1);
5841 EMIT_NEW_ICONST (cfg, ins, 0);
5845 } else if (cmethod->klass == mono_defaults.math_class) {
5847 * There is general branches code for Min/Max, but it does not work for
5849 * http://everything2.com/?node_id=1051618
5851 } else if ((!strcmp (cmethod->klass->image->assembly->aname.name, "MonoMac") || !strcmp (cmethod->klass->image->assembly->aname.name, "monotouch")) && !strcmp (cmethod->klass->name, "Selector") && !strcmp (cmethod->name, "GetHandle") && cfg->compile_aot && (args [0]->opcode == OP_GOT_ENTRY || args[0]->opcode == OP_AOTCONST)) {
5852 #ifdef MONO_ARCH_HAVE_OBJC_GET_SELECTOR
5854 MonoJumpInfoToken *ji;
5857 cfg->disable_llvm = TRUE;
5859 if (args [0]->opcode == OP_GOT_ENTRY) {
5860 pi = args [0]->inst_p1;
5861 g_assert (pi->opcode == OP_PATCH_INFO);
5862 g_assert (GPOINTER_TO_INT (pi->inst_p1) == MONO_PATCH_INFO_LDSTR);
5865 g_assert (GPOINTER_TO_INT (args [0]->inst_p1) == MONO_PATCH_INFO_LDSTR);
5866 ji = args [0]->inst_p0;
5869 NULLIFY_INS (args [0]);
5872 s = mono_ldstr (cfg->domain, ji->image, mono_metadata_token_index (ji->token));
5873 MONO_INST_NEW (cfg, ins, OP_OBJC_GET_SELECTOR);
5874 ins->dreg = mono_alloc_ireg (cfg);
5876 ins->inst_p0 = mono_string_to_utf8 (s);
5877 MONO_ADD_INS (cfg->cbb, ins);
5882 #ifdef MONO_ARCH_SIMD_INTRINSICS
5883 if (cfg->opt & MONO_OPT_SIMD) {
5884 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
5890 ins = mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
5894 if (COMPILE_LLVM (cfg)) {
5895 ins = llvm_emit_inst_for_method (cfg, cmethod, fsig, args);
5900 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
5904 * This entry point could be used later for arbitrary method
5907 inline static MonoInst*
5908 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
5909 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
5911 if (method->klass == mono_defaults.string_class) {
5912 /* managed string allocation support */
5913 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS) && !(cfg->opt & MONO_OPT_SHARED)) {
5914 MonoInst *iargs [2];
5915 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
5916 MonoMethod *managed_alloc = NULL;
5918 g_assert (vtable); /*Should not fail since it System.String*/
5919 #ifndef MONO_CROSS_COMPILE
5920 managed_alloc = mono_gc_get_managed_allocator (method->klass, FALSE, FALSE);
5924 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
5925 iargs [1] = args [0];
5926 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
5933 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
5935 MonoInst *store, *temp;
5938 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
5939 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
5942 * FIXME: We should use *args++ = sp [0], but that would mean the arg
5943 * would be different than the MonoInst's used to represent arguments, and
5944 * the ldelema implementation can't deal with that.
5945 * Solution: When ldelema is used on an inline argument, create a var for
5946 * it, emit ldelema on that var, and emit the saving code below in
5947 * inline_method () if needed.
5949 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
5950 cfg->args [i] = temp;
5951 /* This uses cfg->args [i] which is set by the preceeding line */
5952 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
5953 store->cil_code = sp [0]->cil_code;
5958 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
5959 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
5961 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
5963 check_inline_called_method_name_limit (MonoMethod *called_method)
5966 static const char *limit = NULL;
5968 if (limit == NULL) {
5969 const char *limit_string = g_getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
5971 if (limit_string != NULL)
5972 limit = limit_string;
5977 if (limit [0] != '\0') {
5978 char *called_method_name = mono_method_full_name (called_method, TRUE);
5980 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
5981 g_free (called_method_name);
5983 //return (strncmp_result <= 0);
5984 return (strncmp_result == 0);
5991 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
5993 check_inline_caller_method_name_limit (MonoMethod *caller_method)
5996 static const char *limit = NULL;
5998 if (limit == NULL) {
5999 const char *limit_string = g_getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
6000 if (limit_string != NULL) {
6001 limit = limit_string;
6007 if (limit [0] != '\0') {
6008 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
6010 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
6011 g_free (caller_method_name);
6013 //return (strncmp_result <= 0);
6014 return (strncmp_result == 0);
6022 emit_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
6024 static double r8_0 = 0.0;
6028 rtype = mini_replace_type (rtype);
6032 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
6033 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
6034 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
6035 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
6036 MONO_EMIT_NEW_I8CONST (cfg, dreg, 0);
6037 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
6038 MONO_INST_NEW (cfg, ins, OP_R8CONST);
6039 ins->type = STACK_R8;
6040 ins->inst_p0 = (void*)&r8_0;
6042 MONO_ADD_INS (cfg->cbb, ins);
6043 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
6044 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
6045 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
6046 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (cfg, rtype)) {
6047 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
6049 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
6054 emit_dummy_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
6058 rtype = mini_replace_type (rtype);
6062 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_PCONST);
6063 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
6064 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_ICONST);
6065 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
6066 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_I8CONST);
6067 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
6068 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R8CONST);
6069 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
6070 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
6071 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
6072 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (cfg, rtype)) {
6073 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
6075 emit_init_rvar (cfg, dreg, rtype);
6079 /* If INIT is FALSE, emit dummy initialization statements to keep the IR valid */
6081 emit_init_local (MonoCompile *cfg, int local, MonoType *type, gboolean init)
6083 MonoInst *var = cfg->locals [local];
6084 if (COMPILE_SOFT_FLOAT (cfg)) {
6086 int reg = alloc_dreg (cfg, var->type);
6087 emit_init_rvar (cfg, reg, type);
6088 EMIT_NEW_LOCSTORE (cfg, store, local, cfg->cbb->last_ins);
6091 emit_init_rvar (cfg, var->dreg, type);
6093 emit_dummy_init_rvar (cfg, var->dreg, type);
6100 * Return the cost of inlining CMETHOD.
6103 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
6104 guchar *ip, guint real_offset, gboolean inline_always, MonoBasicBlock **out_cbb)
6106 MonoInst *ins, *rvar = NULL;
6107 MonoMethodHeader *cheader;
6108 MonoBasicBlock *ebblock, *sbblock;
6110 MonoMethod *prev_inlined_method;
6111 MonoInst **prev_locals, **prev_args;
6112 MonoType **prev_arg_types;
6113 guint prev_real_offset;
6114 GHashTable *prev_cbb_hash;
6115 MonoBasicBlock **prev_cil_offset_to_bb;
6116 MonoBasicBlock *prev_cbb;
6117 unsigned char* prev_cil_start;
6118 guint32 prev_cil_offset_to_bb_len;
6119 MonoMethod *prev_current_method;
6120 MonoGenericContext *prev_generic_context;
6121 gboolean ret_var_set, prev_ret_var_set, prev_disable_inline, virtual = FALSE;
6123 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
6125 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
6126 if ((! inline_always) && ! check_inline_called_method_name_limit (cmethod))
6129 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
6130 if ((! inline_always) && ! check_inline_caller_method_name_limit (cfg->method))
6134 if (cfg->verbose_level > 2)
6135 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
6137 if (!cmethod->inline_info) {
6138 cfg->stat_inlineable_methods++;
6139 cmethod->inline_info = 1;
6142 /* allocate local variables */
6143 cheader = mono_method_get_header (cmethod);
6145 if (cheader == NULL || mono_loader_get_last_error ()) {
6146 MonoLoaderError *error = mono_loader_get_last_error ();
6149 mono_metadata_free_mh (cheader);
6150 if (inline_always && error)
6151 mono_cfg_set_exception (cfg, error->exception_type);
6153 mono_loader_clear_error ();
6157 /*Must verify before creating locals as it can cause the JIT to assert.*/
6158 if (mono_compile_is_broken (cfg, cmethod, FALSE)) {
6159 mono_metadata_free_mh (cheader);
6163 /* allocate space to store the return value */
6164 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6165 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
6168 prev_locals = cfg->locals;
6169 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
6170 for (i = 0; i < cheader->num_locals; ++i)
6171 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
6173 /* allocate start and end blocks */
6174 /* This is needed so if the inline is aborted, we can clean up */
6175 NEW_BBLOCK (cfg, sbblock);
6176 sbblock->real_offset = real_offset;
6178 NEW_BBLOCK (cfg, ebblock);
6179 ebblock->block_num = cfg->num_bblocks++;
6180 ebblock->real_offset = real_offset;
6182 prev_args = cfg->args;
6183 prev_arg_types = cfg->arg_types;
6184 prev_inlined_method = cfg->inlined_method;
6185 cfg->inlined_method = cmethod;
6186 cfg->ret_var_set = FALSE;
6187 cfg->inline_depth ++;
6188 prev_real_offset = cfg->real_offset;
6189 prev_cbb_hash = cfg->cbb_hash;
6190 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
6191 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
6192 prev_cil_start = cfg->cil_start;
6193 prev_cbb = cfg->cbb;
6194 prev_current_method = cfg->current_method;
6195 prev_generic_context = cfg->generic_context;
6196 prev_ret_var_set = cfg->ret_var_set;
6197 prev_disable_inline = cfg->disable_inline;
6199 if (*ip == CEE_CALLVIRT && !(cmethod->flags & METHOD_ATTRIBUTE_STATIC))
6202 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, sp, real_offset, virtual);
6204 ret_var_set = cfg->ret_var_set;
6206 cfg->inlined_method = prev_inlined_method;
6207 cfg->real_offset = prev_real_offset;
6208 cfg->cbb_hash = prev_cbb_hash;
6209 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
6210 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
6211 cfg->cil_start = prev_cil_start;
6212 cfg->locals = prev_locals;
6213 cfg->args = prev_args;
6214 cfg->arg_types = prev_arg_types;
6215 cfg->current_method = prev_current_method;
6216 cfg->generic_context = prev_generic_context;
6217 cfg->ret_var_set = prev_ret_var_set;
6218 cfg->disable_inline = prev_disable_inline;
6219 cfg->inline_depth --;
6221 if ((costs >= 0 && costs < 60) || inline_always) {
6222 if (cfg->verbose_level > 2)
6223 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
6225 cfg->stat_inlined_methods++;
6227 /* always add some code to avoid block split failures */
6228 MONO_INST_NEW (cfg, ins, OP_NOP);
6229 MONO_ADD_INS (prev_cbb, ins);
6231 prev_cbb->next_bb = sbblock;
6232 link_bblock (cfg, prev_cbb, sbblock);
6235 * Get rid of the begin and end bblocks if possible to aid local
6238 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
6240 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
6241 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
6243 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
6244 MonoBasicBlock *prev = ebblock->in_bb [0];
6245 mono_merge_basic_blocks (cfg, prev, ebblock);
6247 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
6248 mono_merge_basic_blocks (cfg, prev_cbb, prev);
6249 cfg->cbb = prev_cbb;
6253 * Its possible that the rvar is set in some prev bblock, but not in others.
6259 for (i = 0; i < ebblock->in_count; ++i) {
6260 bb = ebblock->in_bb [i];
6262 if (bb->last_ins && bb->last_ins->opcode == OP_NOT_REACHED) {
6265 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
6273 *out_cbb = cfg->cbb;
6277 * If the inlined method contains only a throw, then the ret var is not
6278 * set, so set it to a dummy value.
6281 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
6283 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
6286 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
6289 if (cfg->verbose_level > 2)
6290 printf ("INLINE ABORTED %s (cost %d)\n", mono_method_full_name (cmethod, TRUE), costs);
6291 cfg->exception_type = MONO_EXCEPTION_NONE;
6292 mono_loader_clear_error ();
6294 /* This gets rid of the newly added bblocks */
6295 cfg->cbb = prev_cbb;
6297 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
6302 * Some of these comments may well be out-of-date.
6303 * Design decisions: we do a single pass over the IL code (and we do bblock
6304 * splitting/merging in the few cases when it's required: a back jump to an IL
6305 * address that was not already seen as bblock starting point).
6306 * Code is validated as we go (full verification is still better left to metadata/verify.c).
6307 * Complex operations are decomposed in simpler ones right away. We need to let the
6308 * arch-specific code peek and poke inside this process somehow (except when the
6309 * optimizations can take advantage of the full semantic info of coarse opcodes).
6310 * All the opcodes of the form opcode.s are 'normalized' to opcode.
6311 * MonoInst->opcode initially is the IL opcode or some simplification of that
6312 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
6313 * opcode with value bigger than OP_LAST.
6314 * At this point the IR can be handed over to an interpreter, a dumb code generator
6315 * or to the optimizing code generator that will translate it to SSA form.
6317 * Profiling directed optimizations.
6318 * We may compile by default with few or no optimizations and instrument the code
6319 * or the user may indicate what methods to optimize the most either in a config file
6320 * or through repeated runs where the compiler applies offline the optimizations to
6321 * each method and then decides if it was worth it.
6324 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
6325 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
6326 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
6327 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
6328 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
6329 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
6330 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
6331 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) TYPE_LOAD_ERROR ((klass))
6333 /* offset from br.s -> br like opcodes */
6334 #define BIG_BRANCH_OFFSET 13
6337 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
6339 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
6341 return b == NULL || b == bb;
6345 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
6347 unsigned char *ip = start;
6348 unsigned char *target;
6351 MonoBasicBlock *bblock;
6352 const MonoOpcode *opcode;
6355 cli_addr = ip - start;
6356 i = mono_opcode_value ((const guint8 **)&ip, end);
6359 opcode = &mono_opcodes [i];
6360 switch (opcode->argument) {
6361 case MonoInlineNone:
6364 case MonoInlineString:
6365 case MonoInlineType:
6366 case MonoInlineField:
6367 case MonoInlineMethod:
6370 case MonoShortInlineR:
6377 case MonoShortInlineVar:
6378 case MonoShortInlineI:
6381 case MonoShortInlineBrTarget:
6382 target = start + cli_addr + 2 + (signed char)ip [1];
6383 GET_BBLOCK (cfg, bblock, target);
6386 GET_BBLOCK (cfg, bblock, ip);
6388 case MonoInlineBrTarget:
6389 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
6390 GET_BBLOCK (cfg, bblock, target);
6393 GET_BBLOCK (cfg, bblock, ip);
6395 case MonoInlineSwitch: {
6396 guint32 n = read32 (ip + 1);
6399 cli_addr += 5 + 4 * n;
6400 target = start + cli_addr;
6401 GET_BBLOCK (cfg, bblock, target);
6403 for (j = 0; j < n; ++j) {
6404 target = start + cli_addr + (gint32)read32 (ip);
6405 GET_BBLOCK (cfg, bblock, target);
6415 g_assert_not_reached ();
6418 if (i == CEE_THROW) {
6419 unsigned char *bb_start = ip - 1;
6421 /* Find the start of the bblock containing the throw */
6423 while ((bb_start >= start) && !bblock) {
6424 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
6428 bblock->out_of_line = 1;
6438 static inline MonoMethod *
6439 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
6443 if (m->wrapper_type != MONO_WRAPPER_NONE) {
6444 method = mono_method_get_wrapper_data (m, token);
6447 method = mono_class_inflate_generic_method_checked (method, context, &error);
6448 g_assert (mono_error_ok (&error)); /* FIXME don't swallow the error */
6451 method = mono_get_method_full (m->klass->image, token, klass, context);
6457 static inline MonoMethod *
6458 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
6460 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
6462 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
6468 static inline MonoClass*
6469 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
6474 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6475 klass = mono_method_get_wrapper_data (method, token);
6477 klass = mono_class_inflate_generic_class (klass, context);
6479 klass = mono_class_get_and_inflate_typespec_checked (method->klass->image, token, context, &error);
6480 mono_error_cleanup (&error); /* FIXME don't swallow the error */
6483 mono_class_init (klass);
6487 static inline MonoMethodSignature*
6488 mini_get_signature (MonoMethod *method, guint32 token, MonoGenericContext *context)
6490 MonoMethodSignature *fsig;
6492 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6495 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
6497 fsig = mono_inflate_generic_signature (fsig, context, &error);
6499 g_assert (mono_error_ok (&error));
6502 fsig = mono_metadata_parse_signature (method->klass->image, token);
6508 * Returns TRUE if the JIT should abort inlining because "callee"
6509 * is influenced by security attributes.
6512 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
6516 if ((cfg->method != caller) && mono_security_method_has_declsec (callee)) {
6520 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
6521 if (result == MONO_JIT_SECURITY_OK)
6524 if (result == MONO_JIT_LINKDEMAND_ECMA) {
6525 /* Generate code to throw a SecurityException before the actual call/link */
6526 MonoSecurityManager *secman = mono_security_manager_get_methods ();
6529 NEW_ICONST (cfg, args [0], 4);
6530 NEW_METHODCONST (cfg, args [1], caller);
6531 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
6532 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
6533 /* don't hide previous results */
6534 mono_cfg_set_exception (cfg, MONO_EXCEPTION_SECURITY_LINKDEMAND);
6535 cfg->exception_data = result;
6543 throw_exception (void)
6545 static MonoMethod *method = NULL;
6548 MonoSecurityManager *secman = mono_security_manager_get_methods ();
6549 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
6556 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
6558 MonoMethod *thrower = throw_exception ();
6561 EMIT_NEW_PCONST (cfg, args [0], ex);
6562 mono_emit_method_call (cfg, thrower, args, NULL);
6566 * Return the original method is a wrapper is specified. We can only access
6567 * the custom attributes from the original method.
6570 get_original_method (MonoMethod *method)
6572 if (method->wrapper_type == MONO_WRAPPER_NONE)
6575 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
6576 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
6579 /* in other cases we need to find the original method */
6580 return mono_marshal_method_from_wrapper (method);
6584 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
6585 MonoBasicBlock *bblock, unsigned char *ip)
6587 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
6588 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
6590 emit_throw_exception (cfg, ex);
6594 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
6595 MonoBasicBlock *bblock, unsigned char *ip)
6597 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
6598 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
6600 emit_throw_exception (cfg, ex);
6604 * Check that the IL instructions at ip are the array initialization
6605 * sequence and return the pointer to the data and the size.
6608 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
6611 * newarr[System.Int32]
6613 * ldtoken field valuetype ...
6614 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
6616 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
6618 guint32 token = read32 (ip + 7);
6619 guint32 field_token = read32 (ip + 2);
6620 guint32 field_index = field_token & 0xffffff;
6622 const char *data_ptr;
6624 MonoMethod *cmethod;
6625 MonoClass *dummy_class;
6626 MonoClassField *field = mono_field_from_token_checked (method->klass->image, field_token, &dummy_class, NULL, &error);
6630 mono_error_cleanup (&error); /* FIXME don't swallow the error */
6634 *out_field_token = field_token;
6636 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
6639 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
6641 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
6642 case MONO_TYPE_BOOLEAN:
6646 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
6647 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
6648 case MONO_TYPE_CHAR:
6665 if (size > mono_type_size (field->type, &dummy_align))
6668 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
6669 if (!image_is_dynamic (method->klass->image)) {
6670 field_index = read32 (ip + 2) & 0xffffff;
6671 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
6672 data_ptr = mono_image_rva_map (method->klass->image, rva);
6673 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
6674 /* for aot code we do the lookup on load */
6675 if (aot && data_ptr)
6676 return GUINT_TO_POINTER (rva);
6678 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
6680 data_ptr = mono_field_get_data (field);
6688 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
6690 char *method_fname = mono_method_full_name (method, TRUE);
6692 MonoMethodHeader *header = mono_method_get_header (method);
6694 if (header->code_size == 0)
6695 method_code = g_strdup ("method body is empty.");
6697 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
6698 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
6699 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
6700 g_free (method_fname);
6701 g_free (method_code);
6702 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
6706 set_exception_object (MonoCompile *cfg, MonoException *exception)
6708 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OBJECT_SUPPLIED);
6709 MONO_GC_REGISTER_ROOT_SINGLE (cfg->exception_ptr);
6710 cfg->exception_ptr = exception;
6714 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
6717 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
6718 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
6719 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
6720 /* Optimize reg-reg moves away */
6722 * Can't optimize other opcodes, since sp[0] might point to
6723 * the last ins of a decomposed opcode.
6725 sp [0]->dreg = (cfg)->locals [n]->dreg;
6727 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
6732 * ldloca inhibits many optimizations so try to get rid of it in common
6735 static inline unsigned char *
6736 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
6746 local = read16 (ip + 2);
6750 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
6751 /* From the INITOBJ case */
6752 token = read32 (ip + 2);
6753 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
6754 CHECK_TYPELOAD (klass);
6755 type = mini_replace_type (&klass->byval_arg);
6756 emit_init_local (cfg, local, type, TRUE);
6764 is_exception_class (MonoClass *class)
6767 if (class == mono_defaults.exception_class)
6769 class = class->parent;
6775 * is_jit_optimizer_disabled:
6777 * Determine whenever M's assembly has a DebuggableAttribute with the
6778 * IsJITOptimizerDisabled flag set.
6781 is_jit_optimizer_disabled (MonoMethod *m)
6783 MonoAssembly *ass = m->klass->image->assembly;
6784 MonoCustomAttrInfo* attrs;
6785 static MonoClass *klass;
6787 gboolean val = FALSE;
6790 if (ass->jit_optimizer_disabled_inited)
6791 return ass->jit_optimizer_disabled;
6794 klass = mono_class_from_name (mono_defaults.corlib, "System.Diagnostics", "DebuggableAttribute");
6797 ass->jit_optimizer_disabled = FALSE;
6798 mono_memory_barrier ();
6799 ass->jit_optimizer_disabled_inited = TRUE;
6803 attrs = mono_custom_attrs_from_assembly (ass);
6805 for (i = 0; i < attrs->num_attrs; ++i) {
6806 MonoCustomAttrEntry *attr = &attrs->attrs [i];
6809 MonoMethodSignature *sig;
6811 if (!attr->ctor || attr->ctor->klass != klass)
6813 /* Decode the attribute. See reflection.c */
6814 len = attr->data_size;
6815 p = (const char*)attr->data;
6816 g_assert (read16 (p) == 0x0001);
6819 // FIXME: Support named parameters
6820 sig = mono_method_signature (attr->ctor);
6821 if (sig->param_count != 2 || sig->params [0]->type != MONO_TYPE_BOOLEAN || sig->params [1]->type != MONO_TYPE_BOOLEAN)
6823 /* Two boolean arguments */
6827 mono_custom_attrs_free (attrs);
6830 ass->jit_optimizer_disabled = val;
6831 mono_memory_barrier ();
6832 ass->jit_optimizer_disabled_inited = TRUE;
6838 is_supported_tail_call (MonoCompile *cfg, MonoMethod *method, MonoMethod *cmethod, MonoMethodSignature *fsig, int call_opcode)
6840 gboolean supported_tail_call;
6843 #ifdef MONO_ARCH_HAVE_OP_TAIL_CALL
6844 supported_tail_call = mono_arch_tail_call_supported (cfg, mono_method_signature (method), mono_method_signature (cmethod));
6846 supported_tail_call = mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret);
6849 for (i = 0; i < fsig->param_count; ++i) {
6850 if (fsig->params [i]->byref || fsig->params [i]->type == MONO_TYPE_PTR || fsig->params [i]->type == MONO_TYPE_FNPTR)
6851 /* These can point to the current method's stack */
6852 supported_tail_call = FALSE;
6854 if (fsig->hasthis && cmethod->klass->valuetype)
6855 /* this might point to the current method's stack */
6856 supported_tail_call = FALSE;
6857 if (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)
6858 supported_tail_call = FALSE;
6859 if (cfg->method->save_lmf)
6860 supported_tail_call = FALSE;
6861 if (cmethod->wrapper_type && cmethod->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
6862 supported_tail_call = FALSE;
6863 if (call_opcode != CEE_CALL)
6864 supported_tail_call = FALSE;
6866 /* Debugging support */
6868 if (supported_tail_call) {
6869 if (!mono_debug_count ())
6870 supported_tail_call = FALSE;
6874 return supported_tail_call;
6877 /* the JIT intercepts ldflda instructions to the tlsdata field in ThreadLocal<T> and redirects
6878 * it to the thread local value based on the tls_offset field. Every other kind of access to
6879 * the field causes an assert.
6882 is_magic_tls_access (MonoClassField *field)
6884 if (strcmp (field->name, "tlsdata"))
6886 if (strcmp (field->parent->name, "ThreadLocal`1"))
6888 return field->parent->image == mono_defaults.corlib;
6891 /* emits the code needed to access a managed tls var (like ThreadStatic)
6892 * with the value of the tls offset in offset_reg. thread_ins represents the MonoInternalThread
6893 * pointer for the current thread.
6894 * Returns the MonoInst* representing the address of the tls var.
6897 emit_managed_static_data_access (MonoCompile *cfg, MonoInst *thread_ins, int offset_reg)
6900 int static_data_reg, array_reg, dreg;
6901 int offset2_reg, idx_reg;
6902 // inlined access to the tls data
6903 // idx = (offset >> 24) - 1;
6904 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
6905 static_data_reg = alloc_ireg (cfg);
6906 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, MONO_STRUCT_OFFSET (MonoInternalThread, static_data));
6907 idx_reg = alloc_ireg (cfg);
6908 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
6909 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
6910 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
6911 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
6912 array_reg = alloc_ireg (cfg);
6913 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
6914 offset2_reg = alloc_ireg (cfg);
6915 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
6916 dreg = alloc_ireg (cfg);
6917 EMIT_NEW_BIALU (cfg, addr, OP_PADD, dreg, array_reg, offset2_reg);
6922 * redirect access to the tlsdata field to the tls var given by the tls_offset field.
6923 * this address is cached per-method in cached_tls_addr.
6926 create_magic_tls_access (MonoCompile *cfg, MonoClassField *tls_field, MonoInst **cached_tls_addr, MonoInst *thread_local)
6928 MonoInst *load, *addr, *temp, *store, *thread_ins;
6929 MonoClassField *offset_field;
6931 if (*cached_tls_addr) {
6932 EMIT_NEW_TEMPLOAD (cfg, addr, (*cached_tls_addr)->inst_c0);
6935 thread_ins = mono_get_thread_intrinsic (cfg);
6936 offset_field = mono_class_get_field_from_name (tls_field->parent, "tls_offset");
6938 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, offset_field->type, thread_local->dreg, offset_field->offset);
6940 MONO_ADD_INS (cfg->cbb, thread_ins);
6942 MonoMethod *thread_method;
6943 thread_method = mono_class_get_method_from_name (mono_get_thread_class(), "CurrentInternalThread_internal", 0);
6944 thread_ins = mono_emit_method_call (cfg, thread_method, NULL, NULL);
6946 addr = emit_managed_static_data_access (cfg, thread_ins, load->dreg);
6947 addr->klass = mono_class_from_mono_type (tls_field->type);
6948 addr->type = STACK_MP;
6949 *cached_tls_addr = temp = mono_compile_create_var (cfg, type_from_stack_type (addr), OP_LOCAL);
6950 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, addr);
6952 EMIT_NEW_TEMPLOAD (cfg, addr, temp->inst_c0);
6959 * Handle calls made to ctors from NEWOBJ opcodes.
6961 * REF_BBLOCK will point to the current bblock after the call.
6964 handle_ctor_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used,
6965 MonoInst **sp, guint8 *ip, MonoBasicBlock **ref_bblock, int *inline_costs)
6967 MonoInst *vtable_arg = NULL, *callvirt_this_arg = NULL, *ins;
6968 MonoBasicBlock *bblock = *ref_bblock;
6970 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
6971 mono_method_is_generic_sharable (cmethod, TRUE)) {
6972 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
6973 mono_class_vtable (cfg->domain, cmethod->klass);
6974 CHECK_TYPELOAD (cmethod->klass);
6976 vtable_arg = emit_get_rgctx_method (cfg, context_used,
6977 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
6980 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
6981 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
6983 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
6985 CHECK_TYPELOAD (cmethod->klass);
6986 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
6991 /* Avoid virtual calls to ctors if possible */
6992 if (mono_class_is_marshalbyref (cmethod->klass))
6993 callvirt_this_arg = sp [0];
6995 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
6996 g_assert (MONO_TYPE_IS_VOID (fsig->ret));
6997 CHECK_CFG_EXCEPTION;
6998 } else if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
6999 mono_method_check_inlining (cfg, cmethod) &&
7000 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE)) {
7003 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, FALSE, &bblock))) {
7004 cfg->real_offset += 5;
7006 *inline_costs += costs - 5;
7007 *ref_bblock = bblock;
7009 INLINE_FAILURE ("inline failure");
7010 // FIXME-VT: Clean this up
7011 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))
7012 GSHAREDVT_FAILURE(*ip);
7013 mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, callvirt_this_arg, NULL, NULL);
7015 } else if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
7018 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE);
7019 mono_emit_calli (cfg, fsig, sp, addr, NULL, vtable_arg);
7020 } else if (context_used &&
7021 ((!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
7022 !mono_class_generic_sharing_enabled (cmethod->klass)) || cfg->gsharedvt)) {
7023 MonoInst *cmethod_addr;
7025 /* Generic calls made out of gsharedvt methods cannot be patched, so use an indirect call */
7027 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
7028 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7030 mono_emit_calli (cfg, fsig, sp, cmethod_addr, NULL, vtable_arg);
7032 INLINE_FAILURE ("ctor call");
7033 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp,
7034 callvirt_this_arg, NULL, vtable_arg);
7041 * mono_method_to_ir:
7043 * Translate the .net IL into linear IR.
7046 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
7047 MonoInst *return_var, MonoInst **inline_args,
7048 guint inline_offset, gboolean is_virtual_call)
7051 MonoInst *ins, **sp, **stack_start;
7052 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
7053 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
7054 MonoMethod *cmethod, *method_definition;
7055 MonoInst **arg_array;
7056 MonoMethodHeader *header;
7058 guint32 token, ins_flag;
7060 MonoClass *constrained_call = NULL;
7061 unsigned char *ip, *end, *target, *err_pos;
7062 MonoMethodSignature *sig;
7063 MonoGenericContext *generic_context = NULL;
7064 MonoGenericContainer *generic_container = NULL;
7065 MonoType **param_types;
7066 int i, n, start_new_bblock, dreg;
7067 int num_calls = 0, inline_costs = 0;
7068 int breakpoint_id = 0;
7070 MonoBoolean security, pinvoke;
7071 MonoSecurityManager* secman = NULL;
7072 MonoDeclSecurityActions actions;
7073 GSList *class_inits = NULL;
7074 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
7076 gboolean init_locals, seq_points, skip_dead_blocks;
7077 gboolean sym_seq_points = FALSE;
7078 MonoInst *cached_tls_addr = NULL;
7079 MonoDebugMethodInfo *minfo;
7080 MonoBitSet *seq_point_locs = NULL;
7081 MonoBitSet *seq_point_set_locs = NULL;
7083 cfg->disable_inline = is_jit_optimizer_disabled (method);
7085 /* serialization and xdomain stuff may need access to private fields and methods */
7086 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
7087 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
7088 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
7089 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
7090 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
7091 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
7093 dont_verify |= mono_security_smcs_hack_enabled ();
7095 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
7096 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
7097 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
7098 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
7099 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_STELEMREF;
7101 image = method->klass->image;
7102 header = mono_method_get_header (method);
7104 MonoLoaderError *error;
7106 if ((error = mono_loader_get_last_error ())) {
7107 mono_cfg_set_exception (cfg, error->exception_type);
7109 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
7110 cfg->exception_message = g_strdup_printf ("Missing or incorrect header for method %s", cfg->method->name);
7112 goto exception_exit;
7114 generic_container = mono_method_get_generic_container (method);
7115 sig = mono_method_signature (method);
7116 num_args = sig->hasthis + sig->param_count;
7117 ip = (unsigned char*)header->code;
7118 cfg->cil_start = ip;
7119 end = ip + header->code_size;
7120 cfg->stat_cil_code_size += header->code_size;
7122 seq_points = cfg->gen_seq_points && cfg->method == method;
7123 #ifdef PLATFORM_ANDROID
7124 seq_points &= cfg->method->wrapper_type == MONO_WRAPPER_NONE;
7127 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
7128 /* We could hit a seq point before attaching to the JIT (#8338) */
7132 if (cfg->gen_seq_points_debug_data && cfg->method == method) {
7133 minfo = mono_debug_lookup_method (method);
7135 int i, n_il_offsets;
7139 mono_debug_symfile_get_line_numbers_full (minfo, NULL, NULL, &n_il_offsets, &il_offsets, &line_numbers, NULL, NULL, NULL, NULL);
7140 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7141 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7142 sym_seq_points = TRUE;
7143 for (i = 0; i < n_il_offsets; ++i) {
7144 if (il_offsets [i] < header->code_size)
7145 mono_bitset_set_fast (seq_point_locs, il_offsets [i]);
7147 g_free (il_offsets);
7148 g_free (line_numbers);
7149 } else if (!method->wrapper_type && !method->dynamic && mono_debug_image_has_debug_info (method->klass->image)) {
7150 /* Methods without line number info like auto-generated property accessors */
7151 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7152 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7153 sym_seq_points = TRUE;
7158 * Methods without init_locals set could cause asserts in various passes
7159 * (#497220). To work around this, we emit dummy initialization opcodes
7160 * (OP_DUMMY_ICONST etc.) which generate no code. These are only supported
7161 * on some platforms.
7163 if ((cfg->opt & MONO_OPT_UNSAFE) && ARCH_HAVE_DUMMY_INIT)
7164 init_locals = header->init_locals;
7168 method_definition = method;
7169 while (method_definition->is_inflated) {
7170 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
7171 method_definition = imethod->declaring;
7174 /* SkipVerification is not allowed if core-clr is enabled */
7175 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
7177 dont_verify_stloc = TRUE;
7180 if (sig->is_inflated)
7181 generic_context = mono_method_get_context (method);
7182 else if (generic_container)
7183 generic_context = &generic_container->context;
7184 cfg->generic_context = generic_context;
7186 if (!cfg->generic_sharing_context)
7187 g_assert (!sig->has_type_parameters);
7189 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
7190 g_assert (method->is_inflated);
7191 g_assert (mono_method_get_context (method)->method_inst);
7193 if (method->is_inflated && mono_method_get_context (method)->method_inst)
7194 g_assert (sig->generic_param_count);
7196 if (cfg->method == method) {
7197 cfg->real_offset = 0;
7199 cfg->real_offset = inline_offset;
7202 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
7203 cfg->cil_offset_to_bb_len = header->code_size;
7205 cfg->current_method = method;
7207 if (cfg->verbose_level > 2)
7208 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
7210 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
7212 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
7213 for (n = 0; n < sig->param_count; ++n)
7214 param_types [n + sig->hasthis] = sig->params [n];
7215 cfg->arg_types = param_types;
7217 cfg->dont_inline = g_list_prepend (cfg->dont_inline, method);
7218 if (cfg->method == method) {
7220 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
7221 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
7224 NEW_BBLOCK (cfg, start_bblock);
7225 cfg->bb_entry = start_bblock;
7226 start_bblock->cil_code = NULL;
7227 start_bblock->cil_length = 0;
7228 #if defined(__native_client_codegen__)
7229 MONO_INST_NEW (cfg, ins, OP_NACL_GC_SAFE_POINT);
7230 ins->dreg = alloc_dreg (cfg, STACK_I4);
7231 MONO_ADD_INS (start_bblock, ins);
7235 NEW_BBLOCK (cfg, end_bblock);
7236 cfg->bb_exit = end_bblock;
7237 end_bblock->cil_code = NULL;
7238 end_bblock->cil_length = 0;
7239 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
7240 g_assert (cfg->num_bblocks == 2);
7242 arg_array = cfg->args;
7244 if (header->num_clauses) {
7245 cfg->spvars = g_hash_table_new (NULL, NULL);
7246 cfg->exvars = g_hash_table_new (NULL, NULL);
7248 /* handle exception clauses */
7249 for (i = 0; i < header->num_clauses; ++i) {
7250 MonoBasicBlock *try_bb;
7251 MonoExceptionClause *clause = &header->clauses [i];
7252 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
7253 try_bb->real_offset = clause->try_offset;
7254 try_bb->try_start = TRUE;
7255 try_bb->region = ((i + 1) << 8) | clause->flags;
7256 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
7257 tblock->real_offset = clause->handler_offset;
7258 tblock->flags |= BB_EXCEPTION_HANDLER;
7261 * Linking the try block with the EH block hinders inlining as we won't be able to
7262 * merge the bblocks from inlining and produce an artificial hole for no good reason.
7264 if (COMPILE_LLVM (cfg))
7265 link_bblock (cfg, try_bb, tblock);
7267 if (*(ip + clause->handler_offset) == CEE_POP)
7268 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
7270 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
7271 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
7272 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
7273 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
7274 MONO_ADD_INS (tblock, ins);
7276 if (seq_points && clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY) {
7277 /* finally clauses already have a seq point */
7278 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
7279 MONO_ADD_INS (tblock, ins);
7282 /* todo: is a fault block unsafe to optimize? */
7283 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
7284 tblock->flags |= BB_EXCEPTION_UNSAFE;
7288 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
7290 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
7292 /* catch and filter blocks get the exception object on the stack */
7293 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
7294 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
7295 MonoInst *dummy_use;
7297 /* mostly like handle_stack_args (), but just sets the input args */
7298 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
7299 tblock->in_scount = 1;
7300 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
7301 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
7304 * Add a dummy use for the exvar so its liveness info will be
7308 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
7310 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
7311 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
7312 tblock->flags |= BB_EXCEPTION_HANDLER;
7313 tblock->real_offset = clause->data.filter_offset;
7314 tblock->in_scount = 1;
7315 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
7316 /* The filter block shares the exvar with the handler block */
7317 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
7318 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
7319 MONO_ADD_INS (tblock, ins);
7323 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
7324 clause->data.catch_class &&
7325 cfg->generic_sharing_context &&
7326 mono_class_check_context_used (clause->data.catch_class)) {
7328 * In shared generic code with catch
7329 * clauses containing type variables
7330 * the exception handling code has to
7331 * be able to get to the rgctx.
7332 * Therefore we have to make sure that
7333 * the vtable/mrgctx argument (for
7334 * static or generic methods) or the
7335 * "this" argument (for non-static
7336 * methods) are live.
7338 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
7339 mini_method_get_context (method)->method_inst ||
7340 method->klass->valuetype) {
7341 mono_get_vtable_var (cfg);
7343 MonoInst *dummy_use;
7345 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
7350 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
7351 cfg->cbb = start_bblock;
7352 cfg->args = arg_array;
7353 mono_save_args (cfg, sig, inline_args);
7356 /* FIRST CODE BLOCK */
7357 NEW_BBLOCK (cfg, bblock);
7358 bblock->cil_code = ip;
7362 ADD_BBLOCK (cfg, bblock);
7364 if (cfg->method == method) {
7365 breakpoint_id = mono_debugger_method_has_breakpoint (method);
7366 if (breakpoint_id) {
7367 MONO_INST_NEW (cfg, ins, OP_BREAK);
7368 MONO_ADD_INS (bblock, ins);
7372 if (mono_security_cas_enabled ())
7373 secman = mono_security_manager_get_methods ();
7375 security = (secman && mono_security_method_has_declsec (method));
7376 /* at this point having security doesn't mean we have any code to generate */
7377 if (security && (cfg->method == method)) {
7378 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
7379 * And we do not want to enter the next section (with allocation) if we
7380 * have nothing to generate */
7381 security = mono_declsec_get_demands (method, &actions);
7384 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
7385 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
7387 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
7388 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
7389 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
7391 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
7392 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
7396 mono_custom_attrs_free (custom);
7399 custom = mono_custom_attrs_from_class (wrapped->klass);
7400 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
7404 mono_custom_attrs_free (custom);
7407 /* not a P/Invoke after all */
7412 /* we use a separate basic block for the initialization code */
7413 NEW_BBLOCK (cfg, init_localsbb);
7414 cfg->bb_init = init_localsbb;
7415 init_localsbb->real_offset = cfg->real_offset;
7416 start_bblock->next_bb = init_localsbb;
7417 init_localsbb->next_bb = bblock;
7418 link_bblock (cfg, start_bblock, init_localsbb);
7419 link_bblock (cfg, init_localsbb, bblock);
7421 cfg->cbb = init_localsbb;
7423 if (cfg->gsharedvt && cfg->method == method) {
7424 MonoGSharedVtMethodInfo *info;
7425 MonoInst *var, *locals_var;
7428 info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoGSharedVtMethodInfo));
7429 info->method = cfg->method;
7430 info->count_entries = 16;
7431 info->entries = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
7432 cfg->gsharedvt_info = info;
7434 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
7435 /* prevent it from being register allocated */
7436 //var->flags |= MONO_INST_VOLATILE;
7437 cfg->gsharedvt_info_var = var;
7439 ins = emit_get_rgctx_gsharedvt_method (cfg, mini_method_check_context_used (cfg, method), method, info);
7440 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, var->dreg, ins->dreg);
7442 /* Allocate locals */
7443 locals_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
7444 /* prevent it from being register allocated */
7445 //locals_var->flags |= MONO_INST_VOLATILE;
7446 cfg->gsharedvt_locals_var = locals_var;
7448 dreg = alloc_ireg (cfg);
7449 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, dreg, var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, locals_size));
7451 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
7452 ins->dreg = locals_var->dreg;
7454 MONO_ADD_INS (cfg->cbb, ins);
7455 cfg->gsharedvt_locals_var_ins = ins;
7457 cfg->flags |= MONO_CFG_HAS_ALLOCA;
7460 ins->flags |= MONO_INST_INIT;
7464 /* at this point we know, if security is TRUE, that some code needs to be generated */
7465 if (security && (cfg->method == method)) {
7468 cfg->stat_cas_demand_generation++;
7470 if (actions.demand.blob) {
7471 /* Add code for SecurityAction.Demand */
7472 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
7473 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
7474 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
7475 mono_emit_method_call (cfg, secman->demand, args, NULL);
7477 if (actions.noncasdemand.blob) {
7478 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
7479 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
7480 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
7481 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
7482 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
7483 mono_emit_method_call (cfg, secman->demand, args, NULL);
7485 if (actions.demandchoice.blob) {
7486 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
7487 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
7488 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
7489 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
7490 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
7494 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
7496 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
7499 if (mono_security_core_clr_enabled ()) {
7500 /* check if this is native code, e.g. an icall or a p/invoke */
7501 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
7502 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
7504 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
7505 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
7507 /* if this ia a native call then it can only be JITted from platform code */
7508 if ((icall || pinvk) && method->klass && method->klass->image) {
7509 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
7510 MonoException *ex = icall ? mono_get_exception_security () :
7511 mono_get_exception_method_access ();
7512 emit_throw_exception (cfg, ex);
7519 CHECK_CFG_EXCEPTION;
7521 if (header->code_size == 0)
7524 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
7529 if (cfg->method == method)
7530 mono_debug_init_method (cfg, bblock, breakpoint_id);
7532 for (n = 0; n < header->num_locals; ++n) {
7533 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
7538 /* We force the vtable variable here for all shared methods
7539 for the possibility that they might show up in a stack
7540 trace where their exact instantiation is needed. */
7541 if (cfg->generic_sharing_context && method == cfg->method) {
7542 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
7543 mini_method_get_context (method)->method_inst ||
7544 method->klass->valuetype) {
7545 mono_get_vtable_var (cfg);
7547 /* FIXME: Is there a better way to do this?
7548 We need the variable live for the duration
7549 of the whole method. */
7550 cfg->args [0]->flags |= MONO_INST_VOLATILE;
7554 /* add a check for this != NULL to inlined methods */
7555 if (is_virtual_call) {
7558 NEW_ARGLOAD (cfg, arg_ins, 0);
7559 MONO_ADD_INS (cfg->cbb, arg_ins);
7560 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
7563 skip_dead_blocks = !dont_verify;
7564 if (skip_dead_blocks) {
7565 original_bb = bb = mono_basic_block_split (method, &cfg->error);
7570 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
7571 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
7574 start_new_bblock = 0;
7577 if (cfg->method == method)
7578 cfg->real_offset = ip - header->code;
7580 cfg->real_offset = inline_offset;
7585 if (start_new_bblock) {
7586 bblock->cil_length = ip - bblock->cil_code;
7587 if (start_new_bblock == 2) {
7588 g_assert (ip == tblock->cil_code);
7590 GET_BBLOCK (cfg, tblock, ip);
7592 bblock->next_bb = tblock;
7595 start_new_bblock = 0;
7596 for (i = 0; i < bblock->in_scount; ++i) {
7597 if (cfg->verbose_level > 3)
7598 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
7599 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
7603 g_slist_free (class_inits);
7606 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
7607 link_bblock (cfg, bblock, tblock);
7608 if (sp != stack_start) {
7609 handle_stack_args (cfg, stack_start, sp - stack_start);
7611 CHECK_UNVERIFIABLE (cfg);
7613 bblock->next_bb = tblock;
7616 for (i = 0; i < bblock->in_scount; ++i) {
7617 if (cfg->verbose_level > 3)
7618 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
7619 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
7622 g_slist_free (class_inits);
7627 if (skip_dead_blocks) {
7628 int ip_offset = ip - header->code;
7630 if (ip_offset == bb->end)
7634 int op_size = mono_opcode_size (ip, end);
7635 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
7637 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
7639 if (ip_offset + op_size == bb->end) {
7640 MONO_INST_NEW (cfg, ins, OP_NOP);
7641 MONO_ADD_INS (bblock, ins);
7642 start_new_bblock = 1;
7650 * Sequence points are points where the debugger can place a breakpoint.
7651 * Currently, we generate these automatically at points where the IL
7654 if (seq_points && ((!sym_seq_points && (sp == stack_start)) || (sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code)))) {
7656 * Make methods interruptable at the beginning, and at the targets of
7657 * backward branches.
7658 * Also, do this at the start of every bblock in methods with clauses too,
7659 * to be able to handle instructions with inprecise control flow like
7661 * Backward branches are handled at the end of method-to-ir ().
7663 gboolean intr_loc = ip == header->code || (!cfg->cbb->last_ins && cfg->header->num_clauses);
7665 /* Avoid sequence points on empty IL like .volatile */
7666 // FIXME: Enable this
7667 //if (!(cfg->cbb->last_ins && cfg->cbb->last_ins->opcode == OP_SEQ_POINT)) {
7668 NEW_SEQ_POINT (cfg, ins, ip - header->code, intr_loc);
7669 if (sp != stack_start)
7670 ins->flags |= MONO_INST_NONEMPTY_STACK;
7671 MONO_ADD_INS (cfg->cbb, ins);
7674 mono_bitset_set_fast (seq_point_set_locs, ip - header->code);
7677 bblock->real_offset = cfg->real_offset;
7679 if ((cfg->method == method) && cfg->coverage_info) {
7680 guint32 cil_offset = ip - header->code;
7681 cfg->coverage_info->data [cil_offset].cil_code = ip;
7683 /* TODO: Use an increment here */
7684 #if defined(TARGET_X86)
7685 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
7686 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
7688 MONO_ADD_INS (cfg->cbb, ins);
7690 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
7691 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
7695 if (cfg->verbose_level > 3)
7696 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
7700 if (seq_points && !sym_seq_points && sp != stack_start) {
7702 * The C# compiler uses these nops to notify the JIT that it should
7703 * insert seq points.
7705 NEW_SEQ_POINT (cfg, ins, ip - header->code, FALSE);
7706 MONO_ADD_INS (cfg->cbb, ins);
7708 if (cfg->keep_cil_nops)
7709 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
7711 MONO_INST_NEW (cfg, ins, OP_NOP);
7713 MONO_ADD_INS (bblock, ins);
7716 if (should_insert_brekpoint (cfg->method)) {
7717 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
7719 MONO_INST_NEW (cfg, ins, OP_NOP);
7722 MONO_ADD_INS (bblock, ins);
7728 CHECK_STACK_OVF (1);
7729 n = (*ip)-CEE_LDARG_0;
7731 EMIT_NEW_ARGLOAD (cfg, ins, n);
7739 CHECK_STACK_OVF (1);
7740 n = (*ip)-CEE_LDLOC_0;
7742 EMIT_NEW_LOCLOAD (cfg, ins, n);
7751 n = (*ip)-CEE_STLOC_0;
7754 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
7756 emit_stloc_ir (cfg, sp, header, n);
7763 CHECK_STACK_OVF (1);
7766 EMIT_NEW_ARGLOAD (cfg, ins, n);
7772 CHECK_STACK_OVF (1);
7775 NEW_ARGLOADA (cfg, ins, n);
7776 MONO_ADD_INS (cfg->cbb, ins);
7786 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
7788 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
7793 CHECK_STACK_OVF (1);
7796 EMIT_NEW_LOCLOAD (cfg, ins, n);
7800 case CEE_LDLOCA_S: {
7801 unsigned char *tmp_ip;
7803 CHECK_STACK_OVF (1);
7804 CHECK_LOCAL (ip [1]);
7806 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
7812 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
7821 CHECK_LOCAL (ip [1]);
7822 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
7824 emit_stloc_ir (cfg, sp, header, ip [1]);
7829 CHECK_STACK_OVF (1);
7830 EMIT_NEW_PCONST (cfg, ins, NULL);
7831 ins->type = STACK_OBJ;
7836 CHECK_STACK_OVF (1);
7837 EMIT_NEW_ICONST (cfg, ins, -1);
7850 CHECK_STACK_OVF (1);
7851 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
7857 CHECK_STACK_OVF (1);
7859 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
7865 CHECK_STACK_OVF (1);
7866 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
7872 CHECK_STACK_OVF (1);
7873 MONO_INST_NEW (cfg, ins, OP_I8CONST);
7874 ins->type = STACK_I8;
7875 ins->dreg = alloc_dreg (cfg, STACK_I8);
7877 ins->inst_l = (gint64)read64 (ip);
7878 MONO_ADD_INS (bblock, ins);
7884 gboolean use_aotconst = FALSE;
7886 #ifdef TARGET_POWERPC
7887 /* FIXME: Clean this up */
7888 if (cfg->compile_aot)
7889 use_aotconst = TRUE;
7892 /* FIXME: we should really allocate this only late in the compilation process */
7893 f = mono_domain_alloc (cfg->domain, sizeof (float));
7895 CHECK_STACK_OVF (1);
7901 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
7903 dreg = alloc_freg (cfg);
7904 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
7905 ins->type = STACK_R8;
7907 MONO_INST_NEW (cfg, ins, OP_R4CONST);
7908 ins->type = STACK_R8;
7909 ins->dreg = alloc_dreg (cfg, STACK_R8);
7911 MONO_ADD_INS (bblock, ins);
7921 gboolean use_aotconst = FALSE;
7923 #ifdef TARGET_POWERPC
7924 /* FIXME: Clean this up */
7925 if (cfg->compile_aot)
7926 use_aotconst = TRUE;
7929 /* FIXME: we should really allocate this only late in the compilation process */
7930 d = mono_domain_alloc (cfg->domain, sizeof (double));
7932 CHECK_STACK_OVF (1);
7938 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
7940 dreg = alloc_freg (cfg);
7941 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
7942 ins->type = STACK_R8;
7944 MONO_INST_NEW (cfg, ins, OP_R8CONST);
7945 ins->type = STACK_R8;
7946 ins->dreg = alloc_dreg (cfg, STACK_R8);
7948 MONO_ADD_INS (bblock, ins);
7957 MonoInst *temp, *store;
7959 CHECK_STACK_OVF (1);
7963 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
7964 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
7966 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
7969 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
7982 if (sp [0]->type == STACK_R8)
7983 /* we need to pop the value from the x86 FP stack */
7984 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
7990 INLINE_FAILURE ("jmp");
7991 GSHAREDVT_FAILURE (*ip);
7994 if (stack_start != sp)
7996 token = read32 (ip + 1);
7997 /* FIXME: check the signature matches */
7998 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
8000 if (!cmethod || mono_loader_get_last_error ())
8003 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
8004 GENERIC_SHARING_FAILURE (CEE_JMP);
8006 if (mono_security_cas_enabled ())
8007 CHECK_CFG_EXCEPTION;
8009 emit_instrumentation_call (cfg, mono_profiler_method_leave);
8011 if (ARCH_HAVE_OP_TAIL_CALL) {
8012 MonoMethodSignature *fsig = mono_method_signature (cmethod);
8015 /* Handle tail calls similarly to calls */
8016 n = fsig->param_count + fsig->hasthis;
8020 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
8021 call->method = cmethod;
8022 call->tail_call = TRUE;
8023 call->signature = mono_method_signature (cmethod);
8024 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
8025 call->inst.inst_p0 = cmethod;
8026 for (i = 0; i < n; ++i)
8027 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
8029 mono_arch_emit_call (cfg, call);
8030 cfg->param_area = MAX(cfg->param_area, call->stack_usage);
8031 MONO_ADD_INS (bblock, (MonoInst*)call);
8033 for (i = 0; i < num_args; ++i)
8034 /* Prevent arguments from being optimized away */
8035 arg_array [i]->flags |= MONO_INST_VOLATILE;
8037 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
8038 ins = (MonoInst*)call;
8039 ins->inst_p0 = cmethod;
8040 MONO_ADD_INS (bblock, ins);
8044 start_new_bblock = 1;
8049 case CEE_CALLVIRT: {
8050 MonoInst *addr = NULL;
8051 MonoMethodSignature *fsig = NULL;
8053 int virtual = *ip == CEE_CALLVIRT;
8054 int calli = *ip == CEE_CALLI;
8055 gboolean pass_imt_from_rgctx = FALSE;
8056 MonoInst *imt_arg = NULL;
8057 MonoInst *keep_this_alive = NULL;
8058 gboolean pass_vtable = FALSE;
8059 gboolean pass_mrgctx = FALSE;
8060 MonoInst *vtable_arg = NULL;
8061 gboolean check_this = FALSE;
8062 gboolean supported_tail_call = FALSE;
8063 gboolean tail_call = FALSE;
8064 gboolean need_seq_point = FALSE;
8065 guint32 call_opcode = *ip;
8066 gboolean emit_widen = TRUE;
8067 gboolean push_res = TRUE;
8068 gboolean skip_ret = FALSE;
8069 gboolean delegate_invoke = FALSE;
8072 token = read32 (ip + 1);
8077 //GSHAREDVT_FAILURE (*ip);
8082 fsig = mini_get_signature (method, token, generic_context);
8083 n = fsig->param_count + fsig->hasthis;
8085 if (method->dynamic && fsig->pinvoke) {
8089 * This is a call through a function pointer using a pinvoke
8090 * signature. Have to create a wrapper and call that instead.
8091 * FIXME: This is very slow, need to create a wrapper at JIT time
8092 * instead based on the signature.
8094 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
8095 EMIT_NEW_PCONST (cfg, args [1], fsig);
8097 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
8100 MonoMethod *cil_method;
8102 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
8103 cil_method = cmethod;
8105 if (constrained_call) {
8106 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8107 if (cfg->verbose_level > 2)
8108 printf ("DM Constrained call to %s\n", mono_type_get_full_name (constrained_call));
8109 if (!((constrained_call->byval_arg.type == MONO_TYPE_VAR ||
8110 constrained_call->byval_arg.type == MONO_TYPE_MVAR) &&
8111 cfg->generic_sharing_context)) {
8112 cmethod = mono_get_method_constrained_with_method (image, cil_method, constrained_call, generic_context, &cfg->error);
8116 if (cfg->verbose_level > 2)
8117 printf ("Constrained call to %s\n", mono_type_get_full_name (constrained_call));
8119 if ((constrained_call->byval_arg.type == MONO_TYPE_VAR || constrained_call->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
8121 * This is needed since get_method_constrained can't find
8122 * the method in klass representing a type var.
8123 * The type var is guaranteed to be a reference type in this
8126 if (!mini_is_gsharedvt_klass (cfg, constrained_call))
8127 g_assert (!cmethod->klass->valuetype);
8129 cmethod = mono_get_method_constrained_checked (image, token, constrained_call, generic_context, &cil_method, &cfg->error);
8135 if (!cmethod || mono_loader_get_last_error ())
8137 if (!dont_verify && !cfg->skip_visibility) {
8138 MonoMethod *target_method = cil_method;
8139 if (method->is_inflated) {
8140 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
8142 if (!mono_method_can_access_method (method_definition, target_method) &&
8143 !mono_method_can_access_method (method, cil_method))
8144 METHOD_ACCESS_FAILURE (method, cil_method);
8147 if (mono_security_core_clr_enabled ())
8148 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
8150 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
8151 /* MS.NET seems to silently convert this to a callvirt */
8156 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
8157 * converts to a callvirt.
8159 * tests/bug-515884.il is an example of this behavior
8161 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
8162 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
8163 if (!virtual && mono_class_is_marshalbyref (cmethod->klass) && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
8167 if (!cmethod->klass->inited)
8168 if (!mono_class_init (cmethod->klass))
8169 TYPE_LOAD_ERROR (cmethod->klass);
8171 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
8172 mini_class_is_system_array (cmethod->klass)) {
8173 array_rank = cmethod->klass->rank;
8174 fsig = mono_method_signature (cmethod);
8176 fsig = mono_method_signature (cmethod);
8181 if (fsig->pinvoke) {
8182 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
8183 check_for_pending_exc, cfg->compile_aot);
8184 fsig = mono_method_signature (wrapper);
8185 } else if (constrained_call) {
8186 fsig = mono_method_signature (cmethod);
8188 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
8193 mono_save_token_info (cfg, image, token, cil_method);
8195 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip + 5 - header->code)))
8196 need_seq_point = TRUE;
8198 n = fsig->param_count + fsig->hasthis;
8200 /* Don't support calls made using type arguments for now */
8202 if (cfg->gsharedvt) {
8203 if (mini_is_gsharedvt_signature (cfg, fsig))
8204 GSHAREDVT_FAILURE (*ip);
8208 if (mono_security_cas_enabled ()) {
8209 if (check_linkdemand (cfg, method, cmethod))
8210 INLINE_FAILURE ("linkdemand");
8211 CHECK_CFG_EXCEPTION;
8214 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
8215 g_assert_not_reached ();
8218 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
8221 if (!cfg->generic_sharing_context && cmethod)
8222 g_assert (!mono_method_check_context_used (cmethod));
8226 //g_assert (!virtual || fsig->hasthis);
8230 if (constrained_call) {
8231 if (mini_is_gsharedvt_klass (cfg, constrained_call)) {
8233 * Constrained calls need to behave differently at runtime dependending on whenever the receiver is instantiated as ref type or as a vtype.
8235 if ((cmethod->klass != mono_defaults.object_class) && constrained_call->valuetype && cmethod->klass->valuetype) {
8236 /* The 'Own method' case below */
8237 } else if (cmethod->klass->image != mono_defaults.corlib && !(cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) && !cmethod->klass->valuetype) {
8238 /* 'The type parameter is instantiated as a reference type' case below. */
8239 } else if (((cmethod->klass == mono_defaults.object_class) || (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) || (!cmethod->klass->valuetype && cmethod->klass->image != mono_defaults.corlib)) &&
8240 (MONO_TYPE_IS_VOID (fsig->ret) || MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_IS_REFERENCE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret) || mini_is_gsharedvt_type (cfg, fsig->ret)) &&
8241 (fsig->param_count == 0 || (!fsig->hasthis && fsig->param_count == 1) || (fsig->param_count == 1 && (MONO_TYPE_IS_REFERENCE (fsig->params [0]) || mini_is_gsharedvt_type (cfg, fsig->params [0]))))) {
8242 MonoInst *args [16];
8245 * This case handles calls to
8246 * - object:ToString()/Equals()/GetHashCode(),
8247 * - System.IComparable<T>:CompareTo()
8248 * - System.IEquatable<T>:Equals ()
8249 * plus some simple interface calls enough to support AsyncTaskMethodBuilder.
8253 if (mono_method_check_context_used (cmethod))
8254 args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (cmethod), cmethod, MONO_RGCTX_INFO_METHOD);
8256 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
8257 args [2] = emit_get_rgctx_klass (cfg, mono_class_check_context_used (constrained_call), constrained_call, MONO_RGCTX_INFO_KLASS);
8259 /* !fsig->hasthis is for the wrapper for the Object.GetType () icall */
8260 if (fsig->hasthis && fsig->param_count) {
8261 /* Pass the arguments using a localloc-ed array using the format expected by runtime_invoke () */
8262 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
8263 ins->dreg = alloc_preg (cfg);
8264 ins->inst_imm = fsig->param_count * sizeof (mgreg_t);
8265 MONO_ADD_INS (cfg->cbb, ins);
8268 if (mini_is_gsharedvt_type (cfg, fsig->params [0])) {
8271 args [3] = emit_get_gsharedvt_info_klass (cfg, mono_class_from_mono_type (fsig->params [0]), MONO_RGCTX_INFO_CLASS_BOX_TYPE);
8273 EMIT_NEW_VARLOADA_VREG (cfg, ins, sp [1]->dreg, fsig->params [0]);
8274 addr_reg = ins->dreg;
8275 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, addr_reg);
8277 EMIT_NEW_ICONST (cfg, args [3], 0);
8278 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, sp [1]->dreg);
8281 EMIT_NEW_ICONST (cfg, args [3], 0);
8282 EMIT_NEW_ICONST (cfg, args [4], 0);
8284 ins = mono_emit_jit_icall (cfg, mono_gsharedvt_constrained_call, args);
8287 if (mini_is_gsharedvt_type (cfg, fsig->ret)) {
8288 ins = handle_unbox_gsharedvt (cfg, mono_class_from_mono_type (fsig->ret), ins, &bblock);
8289 } else if (MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret)) {
8293 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), ins->dreg, sizeof (MonoObject));
8294 MONO_ADD_INS (cfg->cbb, add);
8296 NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, add->dreg, 0);
8297 MONO_ADD_INS (cfg->cbb, ins);
8298 /* ins represents the call result */
8303 GSHAREDVT_FAILURE (*ip);
8307 * We have the `constrained.' prefix opcode.
8309 if (constrained_call->valuetype && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
8311 * The type parameter is instantiated as a valuetype,
8312 * but that type doesn't override the method we're
8313 * calling, so we need to box `this'.
8315 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
8316 ins->klass = constrained_call;
8317 sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call), &bblock);
8318 CHECK_CFG_EXCEPTION;
8319 } else if (!constrained_call->valuetype) {
8320 int dreg = alloc_ireg_ref (cfg);
8323 * The type parameter is instantiated as a reference
8324 * type. We have a managed pointer on the stack, so
8325 * we need to dereference it here.
8327 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
8328 ins->type = STACK_OBJ;
8331 if (cmethod->klass->valuetype) {
8334 /* Interface method */
8337 mono_class_setup_vtable (constrained_call);
8338 CHECK_TYPELOAD (constrained_call);
8339 ioffset = mono_class_interface_offset (constrained_call, cmethod->klass);
8341 TYPE_LOAD_ERROR (constrained_call);
8342 slot = mono_method_get_vtable_slot (cmethod);
8344 TYPE_LOAD_ERROR (cmethod->klass);
8345 cmethod = constrained_call->vtable [ioffset + slot];
8347 if (cmethod->klass == mono_defaults.enum_class) {
8348 /* Enum implements some interfaces, so treat this as the first case */
8349 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
8350 ins->klass = constrained_call;
8351 sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call), &bblock);
8352 CHECK_CFG_EXCEPTION;
8357 constrained_call = NULL;
8360 if (!calli && check_call_signature (cfg, fsig, sp))
8363 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
8364 if (cmethod && (cmethod->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (cmethod->name, "Invoke"))
8365 delegate_invoke = TRUE;
8368 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_sharable_method (cfg, cmethod, fsig, sp))) {
8370 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8371 type_to_eval_stack_type ((cfg), fsig->ret, ins);
8379 * If the callee is a shared method, then its static cctor
8380 * might not get called after the call was patched.
8382 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
8383 emit_generic_class_init (cfg, cmethod->klass);
8384 CHECK_TYPELOAD (cmethod->klass);
8388 check_method_sharing (cfg, cmethod, &pass_vtable, &pass_mrgctx);
8390 if (cfg->generic_sharing_context && cmethod) {
8391 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
8393 context_used = mini_method_check_context_used (cfg, cmethod);
8395 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
8396 /* Generic method interface
8397 calls are resolved via a
8398 helper function and don't
8400 if (!cmethod_context || !cmethod_context->method_inst)
8401 pass_imt_from_rgctx = TRUE;
8405 * If a shared method calls another
8406 * shared method then the caller must
8407 * have a generic sharing context
8408 * because the magic trampoline
8409 * requires it. FIXME: We shouldn't
8410 * have to force the vtable/mrgctx
8411 * variable here. Instead there
8412 * should be a flag in the cfg to
8413 * request a generic sharing context.
8416 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
8417 mono_get_vtable_var (cfg);
8422 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
8424 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
8426 CHECK_TYPELOAD (cmethod->klass);
8427 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
8432 g_assert (!vtable_arg);
8434 if (!cfg->compile_aot) {
8436 * emit_get_rgctx_method () calls mono_class_vtable () so check
8437 * for type load errors before.
8439 mono_class_setup_vtable (cmethod->klass);
8440 CHECK_TYPELOAD (cmethod->klass);
8443 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
8445 /* !marshalbyref is needed to properly handle generic methods + remoting */
8446 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
8447 MONO_METHOD_IS_FINAL (cmethod)) &&
8448 !mono_class_is_marshalbyref (cmethod->klass)) {
8455 if (pass_imt_from_rgctx) {
8456 g_assert (!pass_vtable);
8459 imt_arg = emit_get_rgctx_method (cfg, context_used,
8460 cmethod, MONO_RGCTX_INFO_METHOD);
8464 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
8466 /* Calling virtual generic methods */
8467 if (cmethod && virtual &&
8468 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
8469 !(MONO_METHOD_IS_FINAL (cmethod) &&
8470 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
8471 fsig->generic_param_count &&
8472 !(cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))) {
8473 MonoInst *this_temp, *this_arg_temp, *store;
8474 MonoInst *iargs [4];
8475 gboolean use_imt = FALSE;
8477 g_assert (fsig->is_inflated);
8479 /* Prevent inlining of methods that contain indirect calls */
8480 INLINE_FAILURE ("virtual generic call");
8482 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))
8483 GSHAREDVT_FAILURE (*ip);
8485 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK && defined(MONO_ARCH_GSHARED_SUPPORTED)
8486 if (cmethod->wrapper_type == MONO_WRAPPER_NONE && mono_use_imt)
8491 g_assert (!imt_arg);
8493 g_assert (cmethod->is_inflated);
8494 imt_arg = emit_get_rgctx_method (cfg, context_used,
8495 cmethod, MONO_RGCTX_INFO_METHOD);
8496 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, sp [0], imt_arg, NULL);
8498 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
8499 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
8500 MONO_ADD_INS (bblock, store);
8502 /* FIXME: This should be a managed pointer */
8503 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
8505 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
8506 iargs [1] = emit_get_rgctx_method (cfg, context_used,
8507 cmethod, MONO_RGCTX_INFO_METHOD);
8508 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
8509 addr = mono_emit_jit_icall (cfg,
8510 mono_helper_compile_generic_method, iargs);
8512 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
8514 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8521 * Implement a workaround for the inherent races involved in locking:
8527 * If a thread abort happens between the call to Monitor.Enter () and the start of the
8528 * try block, the Exit () won't be executed, see:
8529 * http://www.bluebytesoftware.com/blog/2007/01/30/MonitorEnterThreadAbortsAndOrphanedLocks.aspx
8530 * To work around this, we extend such try blocks to include the last x bytes
8531 * of the Monitor.Enter () call.
8533 if (cmethod && cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
8534 MonoBasicBlock *tbb;
8536 GET_BBLOCK (cfg, tbb, ip + 5);
8538 * Only extend try blocks with a finally, to avoid catching exceptions thrown
8539 * from Monitor.Enter like ArgumentNullException.
8541 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
8542 /* Mark this bblock as needing to be extended */
8543 tbb->extend_try_block = TRUE;
8547 /* Conversion to a JIT intrinsic */
8548 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
8550 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8551 type_to_eval_stack_type ((cfg), fsig->ret, ins);
8558 if (cmethod && (cfg->opt & MONO_OPT_INLINE) &&
8559 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
8560 mono_method_check_inlining (cfg, cmethod)) {
8562 gboolean always = FALSE;
8564 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
8565 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
8566 /* Prevent inlining of methods that call wrappers */
8567 INLINE_FAILURE ("wrapper call");
8568 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
8572 costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, always, &bblock);
8574 cfg->real_offset += 5;
8576 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8577 /* *sp is already set by inline_method */
8582 inline_costs += costs;
8588 /* Tail recursion elimination */
8589 if ((cfg->opt & MONO_OPT_TAILC) && call_opcode == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
8590 gboolean has_vtargs = FALSE;
8593 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
8594 INLINE_FAILURE ("tail call");
8596 /* keep it simple */
8597 for (i = fsig->param_count - 1; i >= 0; i--) {
8598 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
8603 for (i = 0; i < n; ++i)
8604 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
8605 MONO_INST_NEW (cfg, ins, OP_BR);
8606 MONO_ADD_INS (bblock, ins);
8607 tblock = start_bblock->out_bb [0];
8608 link_bblock (cfg, bblock, tblock);
8609 ins->inst_target_bb = tblock;
8610 start_new_bblock = 1;
8612 /* skip the CEE_RET, too */
8613 if (ip_in_bb (cfg, bblock, ip + 5))
8620 inline_costs += 10 * num_calls++;
8623 * Making generic calls out of gsharedvt methods.
8624 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
8625 * patching gshared method addresses into a gsharedvt method.
8627 if (cmethod && cfg->gsharedvt && (mini_is_gsharedvt_signature (cfg, fsig) || cmethod->is_inflated || cmethod->klass->generic_class)) {
8628 MonoRgctxInfoType info_type;
8631 //if (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)
8632 //GSHAREDVT_FAILURE (*ip);
8633 // disable for possible remoting calls
8634 if (fsig->hasthis && (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class))
8635 GSHAREDVT_FAILURE (*ip);
8636 if (fsig->generic_param_count) {
8637 /* virtual generic call */
8638 g_assert (mono_use_imt);
8639 g_assert (!imt_arg);
8640 /* Same as the virtual generic case above */
8641 imt_arg = emit_get_rgctx_method (cfg, context_used,
8642 cmethod, MONO_RGCTX_INFO_METHOD);
8643 /* This is not needed, as the trampoline code will pass one, and it might be passed in the same reg as the imt arg */
8645 } else if ((cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) && !imt_arg) {
8646 /* This can happen when we call a fully instantiated iface method */
8647 imt_arg = emit_get_rgctx_method (cfg, context_used,
8648 cmethod, MONO_RGCTX_INFO_METHOD);
8653 if (cmethod->klass->rank && cmethod->klass->byval_arg.type != MONO_TYPE_SZARRAY)
8654 /* test_0_multi_dim_arrays () in gshared.cs */
8655 GSHAREDVT_FAILURE (*ip);
8657 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (cmethod->name, "Invoke")))
8658 keep_this_alive = sp [0];
8660 if (virtual && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))
8661 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE_VIRT;
8663 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE;
8664 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, info_type);
8666 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
8668 } else if (calli && cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
8670 * We pass the address to the gsharedvt trampoline in the rgctx reg
8672 MonoInst *callee = addr;
8674 if (method->wrapper_type != MONO_WRAPPER_DELEGATE_INVOKE)
8676 GSHAREDVT_FAILURE (*ip);
8678 addr = emit_get_rgctx_sig (cfg, context_used,
8679 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
8680 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, callee);
8684 /* Generic sharing */
8687 * Use this if the callee is gsharedvt sharable too, since
8688 * at runtime we might find an instantiation so the call cannot
8689 * be patched (the 'no_patch' code path in mini-trampolines.c).
8691 if (context_used && !imt_arg && !array_rank && !delegate_invoke &&
8692 (!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
8693 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
8694 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
8695 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
8696 INLINE_FAILURE ("gshared");
8698 g_assert (cfg->generic_sharing_context && cmethod);
8702 * We are compiling a call to a
8703 * generic method from shared code,
8704 * which means that we have to look up
8705 * the method in the rgctx and do an
8709 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
8711 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
8712 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
8716 /* Indirect calls */
8718 if (call_opcode == CEE_CALL)
8719 g_assert (context_used);
8720 else if (call_opcode == CEE_CALLI)
8721 g_assert (!vtable_arg);
8723 /* FIXME: what the hell is this??? */
8724 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
8725 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
8727 /* Prevent inlining of methods with indirect calls */
8728 INLINE_FAILURE ("indirect call");
8730 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST || addr->opcode == OP_GOT_ENTRY) {
8735 * Instead of emitting an indirect call, emit a direct call
8736 * with the contents of the aotconst as the patch info.
8738 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST) {
8739 info_type = addr->inst_c1;
8740 info_data = addr->inst_p0;
8742 info_type = addr->inst_right->inst_c1;
8743 info_data = addr->inst_right->inst_left;
8746 if (info_type == MONO_PATCH_INFO_ICALL_ADDR || info_type == MONO_PATCH_INFO_JIT_ICALL_ADDR) {
8747 ins = (MonoInst*)mono_emit_abs_call (cfg, info_type, info_data, fsig, sp);
8752 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
8760 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
8761 MonoInst *val = sp [fsig->param_count];
8763 if (val->type == STACK_OBJ) {
8764 MonoInst *iargs [2];
8769 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
8772 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
8773 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, val->dreg);
8774 if (cfg->gen_write_barriers && val->type == STACK_OBJ && !(val->opcode == OP_PCONST && val->inst_c0 == 0))
8775 emit_write_barrier (cfg, addr, val);
8776 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
8777 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
8779 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
8780 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
8781 if (!cmethod->klass->element_class->valuetype && !readonly)
8782 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
8783 CHECK_TYPELOAD (cmethod->klass);
8786 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
8789 g_assert_not_reached ();
8796 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
8800 /* Tail prefix / tail call optimization */
8802 /* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests */
8803 /* FIXME: runtime generic context pointer for jumps? */
8804 /* FIXME: handle this for generic sharing eventually */
8805 if (cmethod && (ins_flag & MONO_INST_TAILCALL) &&
8806 !vtable_arg && !cfg->generic_sharing_context && is_supported_tail_call (cfg, method, cmethod, fsig, call_opcode))
8807 supported_tail_call = TRUE;
8809 if (supported_tail_call) {
8812 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
8813 INLINE_FAILURE ("tail call");
8815 //printf ("HIT: %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
8817 if (ARCH_HAVE_OP_TAIL_CALL) {
8818 /* Handle tail calls similarly to normal calls */
8821 emit_instrumentation_call (cfg, mono_profiler_method_leave);
8823 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
8824 call->tail_call = TRUE;
8825 call->method = cmethod;
8826 call->signature = mono_method_signature (cmethod);
8829 * We implement tail calls by storing the actual arguments into the
8830 * argument variables, then emitting a CEE_JMP.
8832 for (i = 0; i < n; ++i) {
8833 /* Prevent argument from being register allocated */
8834 arg_array [i]->flags |= MONO_INST_VOLATILE;
8835 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
8837 ins = (MonoInst*)call;
8838 ins->inst_p0 = cmethod;
8839 ins->inst_p1 = arg_array [0];
8840 MONO_ADD_INS (bblock, ins);
8841 link_bblock (cfg, bblock, end_bblock);
8842 start_new_bblock = 1;
8844 // FIXME: Eliminate unreachable epilogs
8847 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
8848 * only reachable from this call.
8850 GET_BBLOCK (cfg, tblock, ip + 5);
8851 if (tblock == bblock || tblock->in_count == 0)
8860 * Synchronized wrappers.
8861 * Its hard to determine where to replace a method with its synchronized
8862 * wrapper without causing an infinite recursion. The current solution is
8863 * to add the synchronized wrapper in the trampolines, and to
8864 * change the called method to a dummy wrapper, and resolve that wrapper
8865 * to the real method in mono_jit_compile_method ().
8867 if (cfg->method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
8868 MonoMethod *orig = mono_marshal_method_from_wrapper (cfg->method);
8869 if (cmethod == orig || (cmethod->is_inflated && mono_method_get_declaring_generic_method (cmethod) == orig))
8870 cmethod = mono_marshal_get_synchronized_inner_wrapper (cmethod);
8874 INLINE_FAILURE ("call");
8875 ins = mono_emit_method_call_full (cfg, cmethod, fsig, tail_call, sp, virtual ? sp [0] : NULL,
8876 imt_arg, vtable_arg);
8879 link_bblock (cfg, bblock, end_bblock);
8880 start_new_bblock = 1;
8882 // FIXME: Eliminate unreachable epilogs
8885 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
8886 * only reachable from this call.
8888 GET_BBLOCK (cfg, tblock, ip + 5);
8889 if (tblock == bblock || tblock->in_count == 0)
8896 /* End of call, INS should contain the result of the call, if any */
8898 if (push_res && !MONO_TYPE_IS_VOID (fsig->ret)) {
8901 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
8906 if (keep_this_alive) {
8907 MonoInst *dummy_use;
8909 /* See mono_emit_method_call_full () */
8910 EMIT_NEW_DUMMY_USE (cfg, dummy_use, keep_this_alive);
8913 CHECK_CFG_EXCEPTION;
8917 g_assert (*ip == CEE_RET);
8921 constrained_call = NULL;
8923 emit_seq_point (cfg, method, ip, FALSE, TRUE);
8927 if (cfg->method != method) {
8928 /* return from inlined method */
8930 * If in_count == 0, that means the ret is unreachable due to
8931 * being preceeded by a throw. In that case, inline_method () will
8932 * handle setting the return value
8933 * (test case: test_0_inline_throw ()).
8935 if (return_var && cfg->cbb->in_count) {
8936 MonoType *ret_type = mono_method_signature (method)->ret;
8942 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
8945 //g_assert (returnvar != -1);
8946 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
8947 cfg->ret_var_set = TRUE;
8950 emit_instrumentation_call (cfg, mono_profiler_method_leave);
8952 if (cfg->lmf_var && cfg->cbb->in_count)
8956 MonoType *ret_type = mini_replace_type (mono_method_signature (method)->ret);
8958 if (seq_points && !sym_seq_points) {
8960 * Place a seq point here too even through the IL stack is not
8961 * empty, so a step over on
8964 * will work correctly.
8966 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
8967 MONO_ADD_INS (cfg->cbb, ins);
8970 g_assert (!return_var);
8974 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
8977 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
8980 if (!cfg->vret_addr) {
8983 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
8985 EMIT_NEW_RETLOADA (cfg, ret_addr);
8987 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
8988 ins->klass = mono_class_from_mono_type (ret_type);
8991 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
8992 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
8993 MonoInst *iargs [1];
8997 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
8998 mono_arch_emit_setret (cfg, method, conv);
9000 mono_arch_emit_setret (cfg, method, *sp);
9003 mono_arch_emit_setret (cfg, method, *sp);
9008 if (sp != stack_start)
9010 MONO_INST_NEW (cfg, ins, OP_BR);
9012 ins->inst_target_bb = end_bblock;
9013 MONO_ADD_INS (bblock, ins);
9014 link_bblock (cfg, bblock, end_bblock);
9015 start_new_bblock = 1;
9019 MONO_INST_NEW (cfg, ins, OP_BR);
9021 target = ip + 1 + (signed char)(*ip);
9023 GET_BBLOCK (cfg, tblock, target);
9024 link_bblock (cfg, bblock, tblock);
9025 ins->inst_target_bb = tblock;
9026 if (sp != stack_start) {
9027 handle_stack_args (cfg, stack_start, sp - stack_start);
9029 CHECK_UNVERIFIABLE (cfg);
9031 MONO_ADD_INS (bblock, ins);
9032 start_new_bblock = 1;
9033 inline_costs += BRANCH_COST;
9047 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
9049 target = ip + 1 + *(signed char*)ip;
9055 inline_costs += BRANCH_COST;
9059 MONO_INST_NEW (cfg, ins, OP_BR);
9062 target = ip + 4 + (gint32)read32(ip);
9064 GET_BBLOCK (cfg, tblock, target);
9065 link_bblock (cfg, bblock, tblock);
9066 ins->inst_target_bb = tblock;
9067 if (sp != stack_start) {
9068 handle_stack_args (cfg, stack_start, sp - stack_start);
9070 CHECK_UNVERIFIABLE (cfg);
9073 MONO_ADD_INS (bblock, ins);
9075 start_new_bblock = 1;
9076 inline_costs += BRANCH_COST;
9083 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
9084 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
9085 guint32 opsize = is_short ? 1 : 4;
9087 CHECK_OPSIZE (opsize);
9089 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
9092 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
9097 GET_BBLOCK (cfg, tblock, target);
9098 link_bblock (cfg, bblock, tblock);
9099 GET_BBLOCK (cfg, tblock, ip);
9100 link_bblock (cfg, bblock, tblock);
9102 if (sp != stack_start) {
9103 handle_stack_args (cfg, stack_start, sp - stack_start);
9104 CHECK_UNVERIFIABLE (cfg);
9107 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
9108 cmp->sreg1 = sp [0]->dreg;
9109 type_from_op (cmp, sp [0], NULL);
9112 #if SIZEOF_REGISTER == 4
9113 if (cmp->opcode == OP_LCOMPARE_IMM) {
9114 /* Convert it to OP_LCOMPARE */
9115 MONO_INST_NEW (cfg, ins, OP_I8CONST);
9116 ins->type = STACK_I8;
9117 ins->dreg = alloc_dreg (cfg, STACK_I8);
9119 MONO_ADD_INS (bblock, ins);
9120 cmp->opcode = OP_LCOMPARE;
9121 cmp->sreg2 = ins->dreg;
9124 MONO_ADD_INS (bblock, cmp);
9126 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
9127 type_from_op (ins, sp [0], NULL);
9128 MONO_ADD_INS (bblock, ins);
9129 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
9130 GET_BBLOCK (cfg, tblock, target);
9131 ins->inst_true_bb = tblock;
9132 GET_BBLOCK (cfg, tblock, ip);
9133 ins->inst_false_bb = tblock;
9134 start_new_bblock = 2;
9137 inline_costs += BRANCH_COST;
9152 MONO_INST_NEW (cfg, ins, *ip);
9154 target = ip + 4 + (gint32)read32(ip);
9160 inline_costs += BRANCH_COST;
9164 MonoBasicBlock **targets;
9165 MonoBasicBlock *default_bblock;
9166 MonoJumpInfoBBTable *table;
9167 int offset_reg = alloc_preg (cfg);
9168 int target_reg = alloc_preg (cfg);
9169 int table_reg = alloc_preg (cfg);
9170 int sum_reg = alloc_preg (cfg);
9171 gboolean use_op_switch;
9175 n = read32 (ip + 1);
9178 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
9182 CHECK_OPSIZE (n * sizeof (guint32));
9183 target = ip + n * sizeof (guint32);
9185 GET_BBLOCK (cfg, default_bblock, target);
9186 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
9188 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
9189 for (i = 0; i < n; ++i) {
9190 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
9191 targets [i] = tblock;
9192 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
9196 if (sp != stack_start) {
9198 * Link the current bb with the targets as well, so handle_stack_args
9199 * will set their in_stack correctly.
9201 link_bblock (cfg, bblock, default_bblock);
9202 for (i = 0; i < n; ++i)
9203 link_bblock (cfg, bblock, targets [i]);
9205 handle_stack_args (cfg, stack_start, sp - stack_start);
9207 CHECK_UNVERIFIABLE (cfg);
9210 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
9211 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
9214 for (i = 0; i < n; ++i)
9215 link_bblock (cfg, bblock, targets [i]);
9217 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
9218 table->table = targets;
9219 table->table_size = n;
9221 use_op_switch = FALSE;
9223 /* ARM implements SWITCH statements differently */
9224 /* FIXME: Make it use the generic implementation */
9225 if (!cfg->compile_aot)
9226 use_op_switch = TRUE;
9229 if (COMPILE_LLVM (cfg))
9230 use_op_switch = TRUE;
9232 cfg->cbb->has_jump_table = 1;
9234 if (use_op_switch) {
9235 MONO_INST_NEW (cfg, ins, OP_SWITCH);
9236 ins->sreg1 = src1->dreg;
9237 ins->inst_p0 = table;
9238 ins->inst_many_bb = targets;
9239 ins->klass = GUINT_TO_POINTER (n);
9240 MONO_ADD_INS (cfg->cbb, ins);
9242 if (sizeof (gpointer) == 8)
9243 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
9245 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
9247 #if SIZEOF_REGISTER == 8
9248 /* The upper word might not be zero, and we add it to a 64 bit address later */
9249 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
9252 if (cfg->compile_aot) {
9253 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
9255 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
9256 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
9257 ins->inst_p0 = table;
9258 ins->dreg = table_reg;
9259 MONO_ADD_INS (cfg->cbb, ins);
9262 /* FIXME: Use load_memindex */
9263 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
9264 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
9265 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
9267 start_new_bblock = 1;
9268 inline_costs += (BRANCH_COST * 2);
9288 dreg = alloc_freg (cfg);
9291 dreg = alloc_lreg (cfg);
9294 dreg = alloc_ireg_ref (cfg);
9297 dreg = alloc_preg (cfg);
9300 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
9301 ins->type = ldind_type [*ip - CEE_LDIND_I1];
9302 ins->flags |= ins_flag;
9303 MONO_ADD_INS (bblock, ins);
9305 if (ins_flag & MONO_INST_VOLATILE) {
9306 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
9307 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
9308 emit_memory_barrier (cfg, FullBarrier);
9324 if (ins_flag & MONO_INST_VOLATILE) {
9325 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
9326 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
9327 emit_memory_barrier (cfg, FullBarrier);
9330 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
9331 ins->flags |= ins_flag;
9334 MONO_ADD_INS (bblock, ins);
9336 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0)))
9337 emit_write_barrier (cfg, sp [0], sp [1]);
9346 MONO_INST_NEW (cfg, ins, (*ip));
9348 ins->sreg1 = sp [0]->dreg;
9349 ins->sreg2 = sp [1]->dreg;
9350 type_from_op (ins, sp [0], sp [1]);
9352 ins->dreg = alloc_dreg ((cfg), (ins)->type);
9354 /* Use the immediate opcodes if possible */
9355 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
9356 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
9357 if (imm_opcode != -1) {
9358 ins->opcode = imm_opcode;
9359 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
9362 NULLIFY_INS (sp [1]);
9366 MONO_ADD_INS ((cfg)->cbb, (ins));
9368 *sp++ = mono_decompose_opcode (cfg, ins);
9385 MONO_INST_NEW (cfg, ins, (*ip));
9387 ins->sreg1 = sp [0]->dreg;
9388 ins->sreg2 = sp [1]->dreg;
9389 type_from_op (ins, sp [0], sp [1]);
9391 ADD_WIDEN_OP (ins, sp [0], sp [1]);
9392 ins->dreg = alloc_dreg ((cfg), (ins)->type);
9394 /* FIXME: Pass opcode to is_inst_imm */
9396 /* Use the immediate opcodes if possible */
9397 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
9400 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
9401 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
9402 /* Keep emulated opcodes which are optimized away later */
9403 if ((ins->opcode == OP_IREM_UN || ins->opcode == OP_IDIV_UN_IMM) && (cfg->opt & (MONO_OPT_CONSPROP | MONO_OPT_COPYPROP)) && sp [1]->opcode == OP_ICONST && mono_is_power_of_two (sp [1]->inst_c0) >= 0) {
9404 imm_opcode = mono_op_to_op_imm (ins->opcode);
9407 if (imm_opcode != -1) {
9408 ins->opcode = imm_opcode;
9409 if (sp [1]->opcode == OP_I8CONST) {
9410 #if SIZEOF_REGISTER == 8
9411 ins->inst_imm = sp [1]->inst_l;
9413 ins->inst_ls_word = sp [1]->inst_ls_word;
9414 ins->inst_ms_word = sp [1]->inst_ms_word;
9418 ins->inst_imm = (gssize)(sp [1]->inst_c0);
9421 /* Might be followed by an instruction added by ADD_WIDEN_OP */
9422 if (sp [1]->next == NULL)
9423 NULLIFY_INS (sp [1]);
9426 MONO_ADD_INS ((cfg)->cbb, (ins));
9428 *sp++ = mono_decompose_opcode (cfg, ins);
9441 case CEE_CONV_OVF_I8:
9442 case CEE_CONV_OVF_U8:
9446 /* Special case this earlier so we have long constants in the IR */
9447 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
9448 int data = sp [-1]->inst_c0;
9449 sp [-1]->opcode = OP_I8CONST;
9450 sp [-1]->type = STACK_I8;
9451 #if SIZEOF_REGISTER == 8
9452 if ((*ip) == CEE_CONV_U8)
9453 sp [-1]->inst_c0 = (guint32)data;
9455 sp [-1]->inst_c0 = data;
9457 sp [-1]->inst_ls_word = data;
9458 if ((*ip) == CEE_CONV_U8)
9459 sp [-1]->inst_ms_word = 0;
9461 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
9463 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
9470 case CEE_CONV_OVF_I4:
9471 case CEE_CONV_OVF_I1:
9472 case CEE_CONV_OVF_I2:
9473 case CEE_CONV_OVF_I:
9474 case CEE_CONV_OVF_U:
9477 if (sp [-1]->type == STACK_R8) {
9478 ADD_UNOP (CEE_CONV_OVF_I8);
9485 case CEE_CONV_OVF_U1:
9486 case CEE_CONV_OVF_U2:
9487 case CEE_CONV_OVF_U4:
9490 if (sp [-1]->type == STACK_R8) {
9491 ADD_UNOP (CEE_CONV_OVF_U8);
9498 case CEE_CONV_OVF_I1_UN:
9499 case CEE_CONV_OVF_I2_UN:
9500 case CEE_CONV_OVF_I4_UN:
9501 case CEE_CONV_OVF_I8_UN:
9502 case CEE_CONV_OVF_U1_UN:
9503 case CEE_CONV_OVF_U2_UN:
9504 case CEE_CONV_OVF_U4_UN:
9505 case CEE_CONV_OVF_U8_UN:
9506 case CEE_CONV_OVF_I_UN:
9507 case CEE_CONV_OVF_U_UN:
9514 CHECK_CFG_EXCEPTION;
9518 case CEE_ADD_OVF_UN:
9520 case CEE_MUL_OVF_UN:
9522 case CEE_SUB_OVF_UN:
9528 GSHAREDVT_FAILURE (*ip);
9531 token = read32 (ip + 1);
9532 klass = mini_get_class (method, token, generic_context);
9533 CHECK_TYPELOAD (klass);
9535 if (generic_class_is_reference_type (cfg, klass)) {
9536 MonoInst *store, *load;
9537 int dreg = alloc_ireg_ref (cfg);
9539 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
9540 load->flags |= ins_flag;
9541 MONO_ADD_INS (cfg->cbb, load);
9543 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
9544 store->flags |= ins_flag;
9545 MONO_ADD_INS (cfg->cbb, store);
9547 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER)
9548 emit_write_barrier (cfg, sp [0], sp [1]);
9550 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
9562 token = read32 (ip + 1);
9563 klass = mini_get_class (method, token, generic_context);
9564 CHECK_TYPELOAD (klass);
9566 /* Optimize the common ldobj+stloc combination */
9576 loc_index = ip [5] - CEE_STLOC_0;
9583 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
9584 CHECK_LOCAL (loc_index);
9586 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
9587 ins->dreg = cfg->locals [loc_index]->dreg;
9588 ins->flags |= ins_flag;
9591 if (ins_flag & MONO_INST_VOLATILE) {
9592 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
9593 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
9594 emit_memory_barrier (cfg, FullBarrier);
9600 /* Optimize the ldobj+stobj combination */
9601 /* The reference case ends up being a load+store anyway */
9602 /* Skip this if the operation is volatile. */
9603 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass) && !(ins_flag & MONO_INST_VOLATILE)) {
9608 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
9615 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
9616 ins->flags |= ins_flag;
9619 if (ins_flag & MONO_INST_VOLATILE) {
9620 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
9621 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
9622 emit_memory_barrier (cfg, FullBarrier);
9631 CHECK_STACK_OVF (1);
9633 n = read32 (ip + 1);
9635 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
9636 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
9637 ins->type = STACK_OBJ;
9640 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
9641 MonoInst *iargs [1];
9642 char *str = mono_method_get_wrapper_data (method, n);
9644 if (cfg->compile_aot)
9645 EMIT_NEW_LDSTRLITCONST (cfg, iargs [0], str);
9647 EMIT_NEW_PCONST (cfg, iargs [0], str);
9648 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
9650 if (cfg->opt & MONO_OPT_SHARED) {
9651 MonoInst *iargs [3];
9653 if (cfg->compile_aot) {
9654 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
9656 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
9657 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
9658 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
9659 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
9660 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
9662 if (bblock->out_of_line) {
9663 MonoInst *iargs [2];
9665 if (image == mono_defaults.corlib) {
9667 * Avoid relocations in AOT and save some space by using a
9668 * version of helper_ldstr specialized to mscorlib.
9670 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
9671 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
9673 /* Avoid creating the string object */
9674 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
9675 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
9676 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
9680 if (cfg->compile_aot) {
9681 NEW_LDSTRCONST (cfg, ins, image, n);
9683 MONO_ADD_INS (bblock, ins);
9686 NEW_PCONST (cfg, ins, NULL);
9687 ins->type = STACK_OBJ;
9688 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
9690 OUT_OF_MEMORY_FAILURE;
9693 MONO_ADD_INS (bblock, ins);
9702 MonoInst *iargs [2];
9703 MonoMethodSignature *fsig;
9706 MonoInst *vtable_arg = NULL;
9709 token = read32 (ip + 1);
9710 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
9711 if (!cmethod || mono_loader_get_last_error ())
9713 fsig = mono_method_get_signature_checked (cmethod, image, token, NULL, &cfg->error);
9716 mono_save_token_info (cfg, image, token, cmethod);
9718 if (!mono_class_init (cmethod->klass))
9719 TYPE_LOAD_ERROR (cmethod->klass);
9721 context_used = mini_method_check_context_used (cfg, cmethod);
9723 if (mono_security_cas_enabled ()) {
9724 if (check_linkdemand (cfg, method, cmethod))
9725 INLINE_FAILURE ("linkdemand");
9726 CHECK_CFG_EXCEPTION;
9727 } else if (mono_security_core_clr_enabled ()) {
9728 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9731 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
9732 emit_generic_class_init (cfg, cmethod->klass);
9733 CHECK_TYPELOAD (cmethod->klass);
9737 if (cfg->gsharedvt) {
9738 if (mini_is_gsharedvt_variable_signature (sig))
9739 GSHAREDVT_FAILURE (*ip);
9743 n = fsig->param_count;
9747 * Generate smaller code for the common newobj <exception> instruction in
9748 * argument checking code.
9750 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
9751 is_exception_class (cmethod->klass) && n <= 2 &&
9752 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
9753 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
9754 MonoInst *iargs [3];
9758 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
9761 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
9765 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
9770 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
9773 g_assert_not_reached ();
9781 /* move the args to allow room for 'this' in the first position */
9787 /* check_call_signature () requires sp[0] to be set */
9788 this_ins.type = STACK_OBJ;
9790 if (check_call_signature (cfg, fsig, sp))
9795 if (mini_class_is_system_array (cmethod->klass)) {
9796 *sp = emit_get_rgctx_method (cfg, context_used,
9797 cmethod, MONO_RGCTX_INFO_METHOD);
9799 /* Avoid varargs in the common case */
9800 if (fsig->param_count == 1)
9801 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
9802 else if (fsig->param_count == 2)
9803 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
9804 else if (fsig->param_count == 3)
9805 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
9806 else if (fsig->param_count == 4)
9807 alloc = mono_emit_jit_icall (cfg, mono_array_new_4, sp);
9809 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
9810 } else if (cmethod->string_ctor) {
9811 g_assert (!context_used);
9812 g_assert (!vtable_arg);
9813 /* we simply pass a null pointer */
9814 EMIT_NEW_PCONST (cfg, *sp, NULL);
9815 /* now call the string ctor */
9816 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, NULL, NULL, NULL);
9818 if (cmethod->klass->valuetype) {
9819 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
9820 emit_init_rvar (cfg, iargs [0]->dreg, &cmethod->klass->byval_arg);
9821 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
9826 * The code generated by mini_emit_virtual_call () expects
9827 * iargs [0] to be a boxed instance, but luckily the vcall
9828 * will be transformed into a normal call there.
9830 } else if (context_used) {
9831 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
9834 MonoVTable *vtable = NULL;
9836 if (!cfg->compile_aot)
9837 vtable = mono_class_vtable (cfg->domain, cmethod->klass);
9838 CHECK_TYPELOAD (cmethod->klass);
9841 * TypeInitializationExceptions thrown from the mono_runtime_class_init
9842 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
9843 * As a workaround, we call class cctors before allocating objects.
9845 if (mini_field_access_needs_cctor_run (cfg, method, cmethod->klass, vtable) && !(g_slist_find (class_inits, cmethod->klass))) {
9846 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, cmethod->klass, helper_sig_class_init_trampoline, NULL);
9847 if (cfg->verbose_level > 2)
9848 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
9849 class_inits = g_slist_prepend (class_inits, cmethod->klass);
9852 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
9855 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
9858 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
9860 /* Now call the actual ctor */
9861 handle_ctor_call (cfg, cmethod, fsig, context_used, sp, ip, &bblock, &inline_costs);
9862 CHECK_CFG_EXCEPTION;
9865 if (alloc == NULL) {
9867 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
9868 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
9876 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip - header->code)))
9877 emit_seq_point (cfg, method, ip, FALSE, TRUE);
9884 token = read32 (ip + 1);
9885 klass = mini_get_class (method, token, generic_context);
9886 CHECK_TYPELOAD (klass);
9887 if (sp [0]->type != STACK_OBJ)
9890 ins = handle_castclass (cfg, klass, *sp, ip, &bblock, &inline_costs);
9891 CHECK_CFG_EXCEPTION;
9900 token = read32 (ip + 1);
9901 klass = mini_get_class (method, token, generic_context);
9902 CHECK_TYPELOAD (klass);
9903 if (sp [0]->type != STACK_OBJ)
9906 context_used = mini_class_check_context_used (cfg, klass);
9908 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
9909 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
9916 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
9919 if (cfg->compile_aot)
9920 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
9922 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
9924 *sp++ = mono_emit_method_call (cfg, mono_isinst, args, NULL);
9927 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
9928 MonoMethod *mono_isinst;
9929 MonoInst *iargs [1];
9932 mono_isinst = mono_marshal_get_isinst (klass);
9935 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
9936 iargs, ip, cfg->real_offset, TRUE, &bblock);
9937 CHECK_CFG_EXCEPTION;
9938 g_assert (costs > 0);
9941 cfg->real_offset += 5;
9945 inline_costs += costs;
9948 ins = handle_isinst (cfg, klass, *sp, context_used);
9949 CHECK_CFG_EXCEPTION;
9956 case CEE_UNBOX_ANY: {
9957 MonoInst *res, *addr;
9962 token = read32 (ip + 1);
9963 klass = mini_get_class (method, token, generic_context);
9964 CHECK_TYPELOAD (klass);
9966 mono_save_token_info (cfg, image, token, klass);
9968 context_used = mini_class_check_context_used (cfg, klass);
9970 if (mini_is_gsharedvt_klass (cfg, klass)) {
9971 res = handle_unbox_gsharedvt (cfg, klass, *sp, &bblock);
9973 } else if (generic_class_is_reference_type (cfg, klass)) {
9974 res = handle_castclass (cfg, klass, *sp, ip, &bblock, &inline_costs);
9975 CHECK_CFG_EXCEPTION;
9976 } else if (mono_class_is_nullable (klass)) {
9977 res = handle_unbox_nullable (cfg, *sp, klass, context_used);
9979 addr = handle_unbox (cfg, klass, sp, context_used);
9981 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
9997 token = read32 (ip + 1);
9998 klass = mini_get_class (method, token, generic_context);
9999 CHECK_TYPELOAD (klass);
10001 mono_save_token_info (cfg, image, token, klass);
10003 context_used = mini_class_check_context_used (cfg, klass);
10005 if (generic_class_is_reference_type (cfg, klass)) {
10011 if (klass == mono_defaults.void_class)
10013 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
10015 /* frequent check in generic code: box (struct), brtrue */
10017 // FIXME: LLVM can't handle the inconsistent bb linking
10018 if (!mono_class_is_nullable (klass) &&
10019 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) &&
10020 (ip [5] == CEE_BRTRUE ||
10021 ip [5] == CEE_BRTRUE_S ||
10022 ip [5] == CEE_BRFALSE ||
10023 ip [5] == CEE_BRFALSE_S)) {
10024 gboolean is_true = ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S;
10026 MonoBasicBlock *true_bb, *false_bb;
10030 if (cfg->verbose_level > 3) {
10031 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
10032 printf ("<box+brtrue opt>\n");
10037 case CEE_BRFALSE_S:
10040 target = ip + 1 + (signed char)(*ip);
10047 target = ip + 4 + (gint)(read32 (ip));
10051 g_assert_not_reached ();
10055 * We need to link both bblocks, since it is needed for handling stack
10056 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
10057 * Branching to only one of them would lead to inconsistencies, so
10058 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
10060 GET_BBLOCK (cfg, true_bb, target);
10061 GET_BBLOCK (cfg, false_bb, ip);
10063 mono_link_bblock (cfg, cfg->cbb, true_bb);
10064 mono_link_bblock (cfg, cfg->cbb, false_bb);
10066 if (sp != stack_start) {
10067 handle_stack_args (cfg, stack_start, sp - stack_start);
10069 CHECK_UNVERIFIABLE (cfg);
10072 if (COMPILE_LLVM (cfg)) {
10073 dreg = alloc_ireg (cfg);
10074 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
10075 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
10077 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
10079 /* The JIT can't eliminate the iconst+compare */
10080 MONO_INST_NEW (cfg, ins, OP_BR);
10081 ins->inst_target_bb = is_true ? true_bb : false_bb;
10082 MONO_ADD_INS (cfg->cbb, ins);
10085 start_new_bblock = 1;
10089 *sp++ = handle_box (cfg, val, klass, context_used, &bblock);
10091 CHECK_CFG_EXCEPTION;
10100 token = read32 (ip + 1);
10101 klass = mini_get_class (method, token, generic_context);
10102 CHECK_TYPELOAD (klass);
10104 mono_save_token_info (cfg, image, token, klass);
10106 context_used = mini_class_check_context_used (cfg, klass);
10108 if (mono_class_is_nullable (klass)) {
10111 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
10112 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
10116 ins = handle_unbox (cfg, klass, sp, context_used);
10129 MonoClassField *field;
10130 #ifndef DISABLE_REMOTING
10134 gboolean is_instance;
10136 gpointer addr = NULL;
10137 gboolean is_special_static;
10139 MonoInst *store_val = NULL;
10140 MonoInst *thread_ins;
10143 is_instance = (op == CEE_LDFLD || op == CEE_LDFLDA || op == CEE_STFLD);
10145 if (op == CEE_STFLD) {
10148 store_val = sp [1];
10153 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
10155 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
10158 if (op == CEE_STSFLD) {
10161 store_val = sp [0];
10166 token = read32 (ip + 1);
10167 if (method->wrapper_type != MONO_WRAPPER_NONE) {
10168 field = mono_method_get_wrapper_data (method, token);
10169 klass = field->parent;
10172 field = mono_field_from_token_checked (image, token, &klass, generic_context, &cfg->error);
10175 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
10176 FIELD_ACCESS_FAILURE (method, field);
10177 mono_class_init (klass);
10179 if (is_instance && *ip != CEE_LDFLDA && is_magic_tls_access (field))
10182 /* if the class is Critical then transparent code cannot access it's fields */
10183 if (!is_instance && mono_security_core_clr_enabled ())
10184 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
10186 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
10187 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
10188 if (mono_security_core_clr_enabled ())
10189 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
10193 * LDFLD etc. is usable on static fields as well, so convert those cases to
10196 if (is_instance && field->type->attrs & FIELD_ATTRIBUTE_STATIC) {
10208 g_assert_not_reached ();
10210 is_instance = FALSE;
10213 context_used = mini_class_check_context_used (cfg, klass);
10215 /* INSTANCE CASE */
10217 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
10218 if (op == CEE_STFLD) {
10219 if (target_type_is_incompatible (cfg, field->type, sp [1]))
10221 #ifndef DISABLE_REMOTING
10222 if ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class) {
10223 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
10224 MonoInst *iargs [5];
10226 GSHAREDVT_FAILURE (op);
10228 iargs [0] = sp [0];
10229 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10230 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
10231 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
10233 iargs [4] = sp [1];
10235 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
10236 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
10237 iargs, ip, cfg->real_offset, TRUE, &bblock);
10238 CHECK_CFG_EXCEPTION;
10239 g_assert (costs > 0);
10241 cfg->real_offset += 5;
10243 inline_costs += costs;
10245 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
10252 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
10254 if (mini_is_gsharedvt_klass (cfg, klass)) {
10255 MonoInst *offset_ins;
10257 context_used = mini_class_check_context_used (cfg, klass);
10259 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10260 dreg = alloc_ireg_mp (cfg);
10261 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10262 /* The decomposition will call mini_emit_stobj () which will emit a wbarrier if needed */
10263 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, dreg, 0, sp [1]->dreg);
10265 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
10267 if (sp [0]->opcode != OP_LDADDR)
10268 store->flags |= MONO_INST_FAULT;
10270 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
10271 /* insert call to write barrier */
10275 dreg = alloc_ireg_mp (cfg);
10276 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
10277 emit_write_barrier (cfg, ptr, sp [1]);
10280 store->flags |= ins_flag;
10287 #ifndef DISABLE_REMOTING
10288 if (is_instance && ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class)) {
10289 MonoMethod *wrapper = (op == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
10290 MonoInst *iargs [4];
10292 GSHAREDVT_FAILURE (op);
10294 iargs [0] = sp [0];
10295 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10296 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
10297 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
10298 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
10299 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
10300 iargs, ip, cfg->real_offset, TRUE, &bblock);
10301 CHECK_CFG_EXCEPTION;
10302 g_assert (costs > 0);
10304 cfg->real_offset += 5;
10308 inline_costs += costs;
10310 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
10316 if (sp [0]->type == STACK_VTYPE) {
10319 /* Have to compute the address of the variable */
10321 var = get_vreg_to_inst (cfg, sp [0]->dreg);
10323 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
10325 g_assert (var->klass == klass);
10327 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
10331 if (op == CEE_LDFLDA) {
10332 if (is_magic_tls_access (field)) {
10333 GSHAREDVT_FAILURE (*ip);
10335 *sp++ = create_magic_tls_access (cfg, field, &cached_tls_addr, ins);
10337 if (sp [0]->type == STACK_OBJ) {
10338 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
10339 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
10342 dreg = alloc_ireg_mp (cfg);
10344 if (mini_is_gsharedvt_klass (cfg, klass)) {
10345 MonoInst *offset_ins;
10347 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10348 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10350 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
10352 ins->klass = mono_class_from_mono_type (field->type);
10353 ins->type = STACK_MP;
10359 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
10361 if (mini_is_gsharedvt_klass (cfg, klass)) {
10362 MonoInst *offset_ins;
10364 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10365 dreg = alloc_ireg_mp (cfg);
10366 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10367 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, dreg, 0);
10369 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
10371 load->flags |= ins_flag;
10372 if (sp [0]->opcode != OP_LDADDR)
10373 load->flags |= MONO_INST_FAULT;
10387 * We can only support shared generic static
10388 * field access on architectures where the
10389 * trampoline code has been extended to handle
10390 * the generic class init.
10392 #ifndef MONO_ARCH_VTABLE_REG
10393 GENERIC_SHARING_FAILURE (op);
10396 context_used = mini_class_check_context_used (cfg, klass);
10398 ftype = mono_field_get_type (field);
10400 if (ftype->attrs & FIELD_ATTRIBUTE_LITERAL)
10403 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
10404 * to be called here.
10406 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
10407 mono_class_vtable (cfg->domain, klass);
10408 CHECK_TYPELOAD (klass);
10410 mono_domain_lock (cfg->domain);
10411 if (cfg->domain->special_static_fields)
10412 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
10413 mono_domain_unlock (cfg->domain);
10415 is_special_static = mono_class_field_is_special_static (field);
10417 if (is_special_static && ((gsize)addr & 0x80000000) == 0)
10418 thread_ins = mono_get_thread_intrinsic (cfg);
10422 /* Generate IR to compute the field address */
10423 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && thread_ins && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
10425 * Fast access to TLS data
10426 * Inline version of get_thread_static_data () in
10430 int idx, static_data_reg, array_reg, dreg;
10432 GSHAREDVT_FAILURE (op);
10434 // offset &= 0x7fffffff;
10435 // idx = (offset >> 24) - 1;
10436 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
10437 MONO_ADD_INS (cfg->cbb, thread_ins);
10438 static_data_reg = alloc_ireg (cfg);
10439 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, MONO_STRUCT_OFFSET (MonoInternalThread, static_data));
10441 if (cfg->compile_aot) {
10442 int offset_reg, offset2_reg, idx_reg;
10444 /* For TLS variables, this will return the TLS offset */
10445 EMIT_NEW_SFLDACONST (cfg, ins, field);
10446 offset_reg = ins->dreg;
10447 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
10448 idx_reg = alloc_ireg (cfg);
10449 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
10450 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
10451 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
10452 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
10453 array_reg = alloc_ireg (cfg);
10454 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
10455 offset2_reg = alloc_ireg (cfg);
10456 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
10457 dreg = alloc_ireg (cfg);
10458 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
10460 offset = (gsize)addr & 0x7fffffff;
10461 idx = (offset >> 24) - 1;
10463 array_reg = alloc_ireg (cfg);
10464 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
10465 dreg = alloc_ireg (cfg);
10466 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, (offset & 0xffffff));
10468 } else if ((cfg->opt & MONO_OPT_SHARED) ||
10469 (cfg->compile_aot && is_special_static) ||
10470 (context_used && is_special_static)) {
10471 MonoInst *iargs [2];
10473 g_assert (field->parent);
10474 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10475 if (context_used) {
10476 iargs [1] = emit_get_rgctx_field (cfg, context_used,
10477 field, MONO_RGCTX_INFO_CLASS_FIELD);
10479 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
10481 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
10482 } else if (context_used) {
10483 MonoInst *static_data;
10486 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
10487 method->klass->name_space, method->klass->name, method->name,
10488 depth, field->offset);
10491 if (mono_class_needs_cctor_run (klass, method))
10492 emit_generic_class_init (cfg, klass);
10495 * The pointer we're computing here is
10497 * super_info.static_data + field->offset
10499 static_data = emit_get_rgctx_klass (cfg, context_used,
10500 klass, MONO_RGCTX_INFO_STATIC_DATA);
10502 if (mini_is_gsharedvt_klass (cfg, klass)) {
10503 MonoInst *offset_ins;
10505 offset_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10506 dreg = alloc_ireg_mp (cfg);
10507 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, static_data->dreg, offset_ins->dreg);
10508 } else if (field->offset == 0) {
10511 int addr_reg = mono_alloc_preg (cfg);
10512 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
10514 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
10515 MonoInst *iargs [2];
10517 g_assert (field->parent);
10518 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10519 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
10520 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
10522 MonoVTable *vtable = NULL;
10524 if (!cfg->compile_aot)
10525 vtable = mono_class_vtable (cfg->domain, klass);
10526 CHECK_TYPELOAD (klass);
10529 if (mini_field_access_needs_cctor_run (cfg, method, klass, vtable)) {
10530 if (!(g_slist_find (class_inits, klass))) {
10531 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, klass, helper_sig_class_init_trampoline, NULL);
10532 if (cfg->verbose_level > 2)
10533 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
10534 class_inits = g_slist_prepend (class_inits, klass);
10537 if (cfg->run_cctors) {
10539 /* This makes so that inline cannot trigger */
10540 /* .cctors: too many apps depend on them */
10541 /* running with a specific order... */
10543 if (! vtable->initialized)
10544 INLINE_FAILURE ("class init");
10545 ex = mono_runtime_class_init_full (vtable, FALSE);
10547 set_exception_object (cfg, ex);
10548 goto exception_exit;
10552 if (cfg->compile_aot)
10553 EMIT_NEW_SFLDACONST (cfg, ins, field);
10556 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
10558 EMIT_NEW_PCONST (cfg, ins, addr);
10561 MonoInst *iargs [1];
10562 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
10563 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
10567 /* Generate IR to do the actual load/store operation */
10569 if ((op == CEE_STFLD || op == CEE_STSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
10570 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
10571 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
10572 emit_memory_barrier (cfg, FullBarrier);
10575 if (op == CEE_LDSFLDA) {
10576 ins->klass = mono_class_from_mono_type (ftype);
10577 ins->type = STACK_PTR;
10579 } else if (op == CEE_STSFLD) {
10582 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, ftype, ins->dreg, 0, store_val->dreg);
10583 store->flags |= ins_flag;
10585 gboolean is_const = FALSE;
10586 MonoVTable *vtable = NULL;
10587 gpointer addr = NULL;
10589 if (!context_used) {
10590 vtable = mono_class_vtable (cfg->domain, klass);
10591 CHECK_TYPELOAD (klass);
10593 if ((ftype->attrs & FIELD_ATTRIBUTE_INIT_ONLY) && (((addr = mono_aot_readonly_field_override (field)) != NULL) ||
10594 (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) && vtable->initialized))) {
10595 int ro_type = ftype->type;
10597 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
10598 if (ro_type == MONO_TYPE_VALUETYPE && ftype->data.klass->enumtype) {
10599 ro_type = mono_class_enum_basetype (ftype->data.klass)->type;
10602 GSHAREDVT_FAILURE (op);
10604 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
10607 case MONO_TYPE_BOOLEAN:
10609 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
10613 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
10616 case MONO_TYPE_CHAR:
10618 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
10622 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
10627 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
10631 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
10636 case MONO_TYPE_PTR:
10637 case MONO_TYPE_FNPTR:
10638 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
10639 type_to_eval_stack_type ((cfg), field->type, *sp);
10642 case MONO_TYPE_STRING:
10643 case MONO_TYPE_OBJECT:
10644 case MONO_TYPE_CLASS:
10645 case MONO_TYPE_SZARRAY:
10646 case MONO_TYPE_ARRAY:
10647 if (!mono_gc_is_moving ()) {
10648 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
10649 type_to_eval_stack_type ((cfg), field->type, *sp);
10657 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
10662 case MONO_TYPE_VALUETYPE:
10672 CHECK_STACK_OVF (1);
10674 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
10675 load->flags |= ins_flag;
10681 if ((op == CEE_LDFLD || op == CEE_LDSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
10682 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10683 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
10684 emit_memory_barrier (cfg, FullBarrier);
10695 token = read32 (ip + 1);
10696 klass = mini_get_class (method, token, generic_context);
10697 CHECK_TYPELOAD (klass);
10698 if (ins_flag & MONO_INST_VOLATILE) {
10699 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
10700 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
10701 emit_memory_barrier (cfg, FullBarrier);
10703 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
10704 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
10705 ins->flags |= ins_flag;
10706 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
10707 generic_class_is_reference_type (cfg, klass)) {
10708 /* insert call to write barrier */
10709 emit_write_barrier (cfg, sp [0], sp [1]);
10721 const char *data_ptr;
10723 guint32 field_token;
10729 token = read32 (ip + 1);
10731 klass = mini_get_class (method, token, generic_context);
10732 CHECK_TYPELOAD (klass);
10734 context_used = mini_class_check_context_used (cfg, klass);
10736 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
10737 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_OVF_U4);
10738 ins->sreg1 = sp [0]->dreg;
10739 ins->type = STACK_I4;
10740 ins->dreg = alloc_ireg (cfg);
10741 MONO_ADD_INS (cfg->cbb, ins);
10742 *sp = mono_decompose_opcode (cfg, ins);
10745 if (context_used) {
10746 MonoInst *args [3];
10747 MonoClass *array_class = mono_array_class_get (klass, 1);
10748 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class);
10750 /* FIXME: Use OP_NEWARR and decompose later to help abcrem */
10753 args [0] = emit_get_rgctx_klass (cfg, context_used,
10754 array_class, MONO_RGCTX_INFO_VTABLE);
10759 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
10761 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
10763 if (cfg->opt & MONO_OPT_SHARED) {
10764 /* Decompose now to avoid problems with references to the domainvar */
10765 MonoInst *iargs [3];
10767 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10768 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10769 iargs [2] = sp [0];
10771 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
10773 /* Decompose later since it is needed by abcrem */
10774 MonoClass *array_type = mono_array_class_get (klass, 1);
10775 mono_class_vtable (cfg->domain, array_type);
10776 CHECK_TYPELOAD (array_type);
10778 MONO_INST_NEW (cfg, ins, OP_NEWARR);
10779 ins->dreg = alloc_ireg_ref (cfg);
10780 ins->sreg1 = sp [0]->dreg;
10781 ins->inst_newa_class = klass;
10782 ins->type = STACK_OBJ;
10783 ins->klass = array_type;
10784 MONO_ADD_INS (cfg->cbb, ins);
10785 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
10786 cfg->cbb->has_array_access = TRUE;
10788 /* Needed so mono_emit_load_get_addr () gets called */
10789 mono_get_got_var (cfg);
10799 * we inline/optimize the initialization sequence if possible.
10800 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
10801 * for small sizes open code the memcpy
10802 * ensure the rva field is big enough
10804 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
10805 MonoMethod *memcpy_method = get_memcpy_method ();
10806 MonoInst *iargs [3];
10807 int add_reg = alloc_ireg_mp (cfg);
10809 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, MONO_STRUCT_OFFSET (MonoArray, vector));
10810 if (cfg->compile_aot) {
10811 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
10813 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
10815 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
10816 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
10825 if (sp [0]->type != STACK_OBJ)
10828 MONO_INST_NEW (cfg, ins, OP_LDLEN);
10829 ins->dreg = alloc_preg (cfg);
10830 ins->sreg1 = sp [0]->dreg;
10831 ins->type = STACK_I4;
10832 /* This flag will be inherited by the decomposition */
10833 ins->flags |= MONO_INST_FAULT;
10834 MONO_ADD_INS (cfg->cbb, ins);
10835 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
10836 cfg->cbb->has_array_access = TRUE;
10844 if (sp [0]->type != STACK_OBJ)
10847 cfg->flags |= MONO_CFG_HAS_LDELEMA;
10849 klass = mini_get_class (method, read32 (ip + 1), generic_context);
10850 CHECK_TYPELOAD (klass);
10851 /* we need to make sure that this array is exactly the type it needs
10852 * to be for correctness. the wrappers are lax with their usage
10853 * so we need to ignore them here
10855 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
10856 MonoClass *array_class = mono_array_class_get (klass, 1);
10857 mini_emit_check_array_type (cfg, sp [0], array_class);
10858 CHECK_TYPELOAD (array_class);
10862 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
10867 case CEE_LDELEM_I1:
10868 case CEE_LDELEM_U1:
10869 case CEE_LDELEM_I2:
10870 case CEE_LDELEM_U2:
10871 case CEE_LDELEM_I4:
10872 case CEE_LDELEM_U4:
10873 case CEE_LDELEM_I8:
10875 case CEE_LDELEM_R4:
10876 case CEE_LDELEM_R8:
10877 case CEE_LDELEM_REF: {
10883 if (*ip == CEE_LDELEM) {
10885 token = read32 (ip + 1);
10886 klass = mini_get_class (method, token, generic_context);
10887 CHECK_TYPELOAD (klass);
10888 mono_class_init (klass);
10891 klass = array_access_to_klass (*ip);
10893 if (sp [0]->type != STACK_OBJ)
10896 cfg->flags |= MONO_CFG_HAS_LDELEMA;
10898 if (mini_is_gsharedvt_variable_klass (cfg, klass)) {
10899 // FIXME-VT: OP_ICONST optimization
10900 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
10901 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
10902 ins->opcode = OP_LOADV_MEMBASE;
10903 } else if (sp [1]->opcode == OP_ICONST) {
10904 int array_reg = sp [0]->dreg;
10905 int index_reg = sp [1]->dreg;
10906 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
10908 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
10909 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
10911 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
10912 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
10915 if (*ip == CEE_LDELEM)
10922 case CEE_STELEM_I1:
10923 case CEE_STELEM_I2:
10924 case CEE_STELEM_I4:
10925 case CEE_STELEM_I8:
10926 case CEE_STELEM_R4:
10927 case CEE_STELEM_R8:
10928 case CEE_STELEM_REF:
10933 cfg->flags |= MONO_CFG_HAS_LDELEMA;
10935 if (*ip == CEE_STELEM) {
10937 token = read32 (ip + 1);
10938 klass = mini_get_class (method, token, generic_context);
10939 CHECK_TYPELOAD (klass);
10940 mono_class_init (klass);
10943 klass = array_access_to_klass (*ip);
10945 if (sp [0]->type != STACK_OBJ)
10948 emit_array_store (cfg, klass, sp, TRUE);
10950 if (*ip == CEE_STELEM)
10957 case CEE_CKFINITE: {
10961 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
10962 ins->sreg1 = sp [0]->dreg;
10963 ins->dreg = alloc_freg (cfg);
10964 ins->type = STACK_R8;
10965 MONO_ADD_INS (bblock, ins);
10967 *sp++ = mono_decompose_opcode (cfg, ins);
10972 case CEE_REFANYVAL: {
10973 MonoInst *src_var, *src;
10975 int klass_reg = alloc_preg (cfg);
10976 int dreg = alloc_preg (cfg);
10978 GSHAREDVT_FAILURE (*ip);
10981 MONO_INST_NEW (cfg, ins, *ip);
10984 klass = mini_get_class (method, read32 (ip + 1), generic_context);
10985 CHECK_TYPELOAD (klass);
10987 context_used = mini_class_check_context_used (cfg, klass);
10990 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
10992 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
10993 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
10994 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass));
10996 if (context_used) {
10997 MonoInst *klass_ins;
10999 klass_ins = emit_get_rgctx_klass (cfg, context_used,
11000 klass, MONO_RGCTX_INFO_KLASS);
11003 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
11004 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
11006 mini_emit_class_check (cfg, klass_reg, klass);
11008 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value));
11009 ins->type = STACK_MP;
11014 case CEE_MKREFANY: {
11015 MonoInst *loc, *addr;
11017 GSHAREDVT_FAILURE (*ip);
11020 MONO_INST_NEW (cfg, ins, *ip);
11023 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11024 CHECK_TYPELOAD (klass);
11026 context_used = mini_class_check_context_used (cfg, klass);
11028 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
11029 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
11031 if (context_used) {
11032 MonoInst *const_ins;
11033 int type_reg = alloc_preg (cfg);
11035 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
11036 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
11037 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
11038 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
11039 } else if (cfg->compile_aot) {
11040 int const_reg = alloc_preg (cfg);
11041 int type_reg = alloc_preg (cfg);
11043 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
11044 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
11045 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
11046 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
11048 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
11049 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), klass);
11051 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
11053 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
11054 ins->type = STACK_VTYPE;
11055 ins->klass = mono_defaults.typed_reference_class;
11060 case CEE_LDTOKEN: {
11062 MonoClass *handle_class;
11064 CHECK_STACK_OVF (1);
11067 n = read32 (ip + 1);
11069 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
11070 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
11071 handle = mono_method_get_wrapper_data (method, n);
11072 handle_class = mono_method_get_wrapper_data (method, n + 1);
11073 if (handle_class == mono_defaults.typehandle_class)
11074 handle = &((MonoClass*)handle)->byval_arg;
11077 handle = mono_ldtoken_checked (image, n, &handle_class, generic_context, &cfg->error);
11082 mono_class_init (handle_class);
11083 if (cfg->generic_sharing_context) {
11084 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
11085 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
11086 /* This case handles ldtoken
11087 of an open type, like for
11090 } else if (handle_class == mono_defaults.typehandle_class) {
11091 context_used = mini_class_check_context_used (cfg, mono_class_from_mono_type (handle));
11092 } else if (handle_class == mono_defaults.fieldhandle_class)
11093 context_used = mini_class_check_context_used (cfg, ((MonoClassField*)handle)->parent);
11094 else if (handle_class == mono_defaults.methodhandle_class)
11095 context_used = mini_method_check_context_used (cfg, handle);
11097 g_assert_not_reached ();
11100 if ((cfg->opt & MONO_OPT_SHARED) &&
11101 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
11102 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
11103 MonoInst *addr, *vtvar, *iargs [3];
11104 int method_context_used;
11106 method_context_used = mini_method_check_context_used (cfg, method);
11108 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
11110 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
11111 EMIT_NEW_ICONST (cfg, iargs [1], n);
11112 if (method_context_used) {
11113 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
11114 method, MONO_RGCTX_INFO_METHOD);
11115 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
11117 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
11118 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
11120 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11122 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
11124 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11126 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
11127 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
11128 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
11129 (cmethod->klass == mono_defaults.systemtype_class) &&
11130 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
11131 MonoClass *tclass = mono_class_from_mono_type (handle);
11133 mono_class_init (tclass);
11134 if (context_used) {
11135 ins = emit_get_rgctx_klass (cfg, context_used,
11136 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
11137 } else if (cfg->compile_aot) {
11138 if (method->wrapper_type) {
11139 mono_error_init (&error); //got to do it since there are multiple conditionals below
11140 if (mono_class_get_checked (tclass->image, tclass->type_token, &error) == tclass && !generic_context) {
11141 /* Special case for static synchronized wrappers */
11142 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
11144 mono_error_cleanup (&error); /* FIXME don't swallow the error */
11145 /* FIXME: n is not a normal token */
11147 EMIT_NEW_PCONST (cfg, ins, NULL);
11150 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
11153 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
11155 ins->type = STACK_OBJ;
11156 ins->klass = cmethod->klass;
11159 MonoInst *addr, *vtvar;
11161 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
11163 if (context_used) {
11164 if (handle_class == mono_defaults.typehandle_class) {
11165 ins = emit_get_rgctx_klass (cfg, context_used,
11166 mono_class_from_mono_type (handle),
11167 MONO_RGCTX_INFO_TYPE);
11168 } else if (handle_class == mono_defaults.methodhandle_class) {
11169 ins = emit_get_rgctx_method (cfg, context_used,
11170 handle, MONO_RGCTX_INFO_METHOD);
11171 } else if (handle_class == mono_defaults.fieldhandle_class) {
11172 ins = emit_get_rgctx_field (cfg, context_used,
11173 handle, MONO_RGCTX_INFO_CLASS_FIELD);
11175 g_assert_not_reached ();
11177 } else if (cfg->compile_aot) {
11178 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n, generic_context);
11180 EMIT_NEW_PCONST (cfg, ins, handle);
11182 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11183 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
11184 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11194 MONO_INST_NEW (cfg, ins, OP_THROW);
11196 ins->sreg1 = sp [0]->dreg;
11198 bblock->out_of_line = TRUE;
11199 MONO_ADD_INS (bblock, ins);
11200 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
11201 MONO_ADD_INS (bblock, ins);
11204 link_bblock (cfg, bblock, end_bblock);
11205 start_new_bblock = 1;
11207 case CEE_ENDFINALLY:
11208 /* mono_save_seq_point_info () depends on this */
11209 if (sp != stack_start)
11210 emit_seq_point (cfg, method, ip, FALSE, FALSE);
11211 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
11212 MONO_ADD_INS (bblock, ins);
11214 start_new_bblock = 1;
11217 * Control will leave the method so empty the stack, otherwise
11218 * the next basic block will start with a nonempty stack.
11220 while (sp != stack_start) {
11225 case CEE_LEAVE_S: {
11228 if (*ip == CEE_LEAVE) {
11230 target = ip + 5 + (gint32)read32(ip + 1);
11233 target = ip + 2 + (signed char)(ip [1]);
11236 /* empty the stack */
11237 while (sp != stack_start) {
11242 * If this leave statement is in a catch block, check for a
11243 * pending exception, and rethrow it if necessary.
11244 * We avoid doing this in runtime invoke wrappers, since those are called
11245 * by native code which excepts the wrapper to catch all exceptions.
11247 for (i = 0; i < header->num_clauses; ++i) {
11248 MonoExceptionClause *clause = &header->clauses [i];
11251 * Use <= in the final comparison to handle clauses with multiple
11252 * leave statements, like in bug #78024.
11253 * The ordering of the exception clauses guarantees that we find the
11254 * innermost clause.
11256 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
11258 MonoBasicBlock *dont_throw;
11263 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
11266 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
11268 NEW_BBLOCK (cfg, dont_throw);
11271 * Currently, we always rethrow the abort exception, despite the
11272 * fact that this is not correct. See thread6.cs for an example.
11273 * But propagating the abort exception is more important than
11274 * getting the sematics right.
11276 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
11277 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
11278 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
11280 MONO_START_BB (cfg, dont_throw);
11285 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
11287 MonoExceptionClause *clause;
11289 for (tmp = handlers; tmp; tmp = tmp->next) {
11290 clause = tmp->data;
11291 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
11293 link_bblock (cfg, bblock, tblock);
11294 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
11295 ins->inst_target_bb = tblock;
11296 ins->inst_eh_block = clause;
11297 MONO_ADD_INS (bblock, ins);
11298 bblock->has_call_handler = 1;
11299 if (COMPILE_LLVM (cfg)) {
11300 MonoBasicBlock *target_bb;
11303 * Link the finally bblock with the target, since it will
11304 * conceptually branch there.
11305 * FIXME: Have to link the bblock containing the endfinally.
11307 GET_BBLOCK (cfg, target_bb, target);
11308 link_bblock (cfg, tblock, target_bb);
11311 g_list_free (handlers);
11314 MONO_INST_NEW (cfg, ins, OP_BR);
11315 MONO_ADD_INS (bblock, ins);
11316 GET_BBLOCK (cfg, tblock, target);
11317 link_bblock (cfg, bblock, tblock);
11318 ins->inst_target_bb = tblock;
11319 start_new_bblock = 1;
11321 if (*ip == CEE_LEAVE)
11330 * Mono specific opcodes
11332 case MONO_CUSTOM_PREFIX: {
11334 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
11338 case CEE_MONO_ICALL: {
11340 MonoJitICallInfo *info;
11342 token = read32 (ip + 2);
11343 func = mono_method_get_wrapper_data (method, token);
11344 info = mono_find_jit_icall_by_addr (func);
11346 g_error ("Could not find icall address in wrapper %s", mono_method_full_name (method, 1));
11349 CHECK_STACK (info->sig->param_count);
11350 sp -= info->sig->param_count;
11352 ins = mono_emit_jit_icall (cfg, info->func, sp);
11353 if (!MONO_TYPE_IS_VOID (info->sig->ret))
11357 inline_costs += 10 * num_calls++;
11361 case CEE_MONO_LDPTR: {
11364 CHECK_STACK_OVF (1);
11366 token = read32 (ip + 2);
11368 ptr = mono_method_get_wrapper_data (method, token);
11369 /* FIXME: Generalize this */
11370 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
11371 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
11376 EMIT_NEW_PCONST (cfg, ins, ptr);
11379 inline_costs += 10 * num_calls++;
11380 /* Can't embed random pointers into AOT code */
11384 case CEE_MONO_JIT_ICALL_ADDR: {
11385 MonoJitICallInfo *callinfo;
11388 CHECK_STACK_OVF (1);
11390 token = read32 (ip + 2);
11392 ptr = mono_method_get_wrapper_data (method, token);
11393 callinfo = mono_find_jit_icall_by_addr (ptr);
11394 g_assert (callinfo);
11395 EMIT_NEW_JIT_ICALL_ADDRCONST (cfg, ins, (char*)callinfo->name);
11398 inline_costs += 10 * num_calls++;
11401 case CEE_MONO_ICALL_ADDR: {
11402 MonoMethod *cmethod;
11405 CHECK_STACK_OVF (1);
11407 token = read32 (ip + 2);
11409 cmethod = mono_method_get_wrapper_data (method, token);
11411 if (cfg->compile_aot) {
11412 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
11414 ptr = mono_lookup_internal_call (cmethod);
11416 EMIT_NEW_PCONST (cfg, ins, ptr);
11422 case CEE_MONO_VTADDR: {
11423 MonoInst *src_var, *src;
11429 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
11430 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
11435 case CEE_MONO_NEWOBJ: {
11436 MonoInst *iargs [2];
11438 CHECK_STACK_OVF (1);
11440 token = read32 (ip + 2);
11441 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
11442 mono_class_init (klass);
11443 NEW_DOMAINCONST (cfg, iargs [0]);
11444 MONO_ADD_INS (cfg->cbb, iargs [0]);
11445 NEW_CLASSCONST (cfg, iargs [1], klass);
11446 MONO_ADD_INS (cfg->cbb, iargs [1]);
11447 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
11449 inline_costs += 10 * num_calls++;
11452 case CEE_MONO_OBJADDR:
11455 MONO_INST_NEW (cfg, ins, OP_MOVE);
11456 ins->dreg = alloc_ireg_mp (cfg);
11457 ins->sreg1 = sp [0]->dreg;
11458 ins->type = STACK_MP;
11459 MONO_ADD_INS (cfg->cbb, ins);
11463 case CEE_MONO_LDNATIVEOBJ:
11465 * Similar to LDOBJ, but instead load the unmanaged
11466 * representation of the vtype to the stack.
11471 token = read32 (ip + 2);
11472 klass = mono_method_get_wrapper_data (method, token);
11473 g_assert (klass->valuetype);
11474 mono_class_init (klass);
11477 MonoInst *src, *dest, *temp;
11480 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
11481 temp->backend.is_pinvoke = 1;
11482 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
11483 mini_emit_stobj (cfg, dest, src, klass, TRUE);
11485 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
11486 dest->type = STACK_VTYPE;
11487 dest->klass = klass;
11493 case CEE_MONO_RETOBJ: {
11495 * Same as RET, but return the native representation of a vtype
11498 g_assert (cfg->ret);
11499 g_assert (mono_method_signature (method)->pinvoke);
11504 token = read32 (ip + 2);
11505 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
11507 if (!cfg->vret_addr) {
11508 g_assert (cfg->ret_var_is_local);
11510 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
11512 EMIT_NEW_RETLOADA (cfg, ins);
11514 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
11516 if (sp != stack_start)
11519 MONO_INST_NEW (cfg, ins, OP_BR);
11520 ins->inst_target_bb = end_bblock;
11521 MONO_ADD_INS (bblock, ins);
11522 link_bblock (cfg, bblock, end_bblock);
11523 start_new_bblock = 1;
11527 case CEE_MONO_CISINST:
11528 case CEE_MONO_CCASTCLASS: {
11533 token = read32 (ip + 2);
11534 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
11535 if (ip [1] == CEE_MONO_CISINST)
11536 ins = handle_cisinst (cfg, klass, sp [0]);
11538 ins = handle_ccastclass (cfg, klass, sp [0]);
11544 case CEE_MONO_SAVE_LMF:
11545 case CEE_MONO_RESTORE_LMF:
11546 #ifdef MONO_ARCH_HAVE_LMF_OPS
11547 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
11548 MONO_ADD_INS (bblock, ins);
11549 cfg->need_lmf_area = TRUE;
11553 case CEE_MONO_CLASSCONST:
11554 CHECK_STACK_OVF (1);
11556 token = read32 (ip + 2);
11557 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
11560 inline_costs += 10 * num_calls++;
11562 case CEE_MONO_NOT_TAKEN:
11563 bblock->out_of_line = TRUE;
11566 case CEE_MONO_TLS: {
11569 CHECK_STACK_OVF (1);
11571 key = (gint32)read32 (ip + 2);
11572 g_assert (key < TLS_KEY_NUM);
11574 ins = mono_create_tls_get (cfg, key);
11576 if (cfg->compile_aot) {
11578 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
11579 ins->dreg = alloc_preg (cfg);
11580 ins->type = STACK_PTR;
11582 g_assert_not_reached ();
11585 ins->type = STACK_PTR;
11586 MONO_ADD_INS (bblock, ins);
11591 case CEE_MONO_DYN_CALL: {
11592 MonoCallInst *call;
11594 /* It would be easier to call a trampoline, but that would put an
11595 * extra frame on the stack, confusing exception handling. So
11596 * implement it inline using an opcode for now.
11599 if (!cfg->dyn_call_var) {
11600 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
11601 /* prevent it from being register allocated */
11602 cfg->dyn_call_var->flags |= MONO_INST_VOLATILE;
11605 /* Has to use a call inst since it local regalloc expects it */
11606 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
11607 ins = (MonoInst*)call;
11609 ins->sreg1 = sp [0]->dreg;
11610 ins->sreg2 = sp [1]->dreg;
11611 MONO_ADD_INS (bblock, ins);
11613 cfg->param_area = MAX (cfg->param_area, MONO_ARCH_DYN_CALL_PARAM_AREA);
11616 inline_costs += 10 * num_calls++;
11620 case CEE_MONO_MEMORY_BARRIER: {
11622 emit_memory_barrier (cfg, (int)read32 (ip + 1));
11626 case CEE_MONO_JIT_ATTACH: {
11627 MonoInst *args [16], *domain_ins;
11628 MonoInst *ad_ins, *jit_tls_ins;
11629 MonoBasicBlock *next_bb = NULL, *call_bb = NULL;
11631 cfg->orig_domain_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
11633 EMIT_NEW_PCONST (cfg, ins, NULL);
11634 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
11636 ad_ins = mono_get_domain_intrinsic (cfg);
11637 jit_tls_ins = mono_get_jit_tls_intrinsic (cfg);
11639 if (MONO_ARCH_HAVE_TLS_GET && ad_ins && jit_tls_ins) {
11640 NEW_BBLOCK (cfg, next_bb);
11641 NEW_BBLOCK (cfg, call_bb);
11643 if (cfg->compile_aot) {
11644 /* AOT code is only used in the root domain */
11645 EMIT_NEW_PCONST (cfg, domain_ins, NULL);
11647 EMIT_NEW_PCONST (cfg, domain_ins, cfg->domain);
11649 MONO_ADD_INS (cfg->cbb, ad_ins);
11650 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, ad_ins->dreg, domain_ins->dreg);
11651 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, call_bb);
11653 MONO_ADD_INS (cfg->cbb, jit_tls_ins);
11654 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, jit_tls_ins->dreg, 0);
11655 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, call_bb);
11657 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, next_bb);
11658 MONO_START_BB (cfg, call_bb);
11661 if (cfg->compile_aot) {
11662 /* AOT code is only used in the root domain */
11663 EMIT_NEW_PCONST (cfg, args [0], NULL);
11665 EMIT_NEW_PCONST (cfg, args [0], cfg->domain);
11667 ins = mono_emit_jit_icall (cfg, mono_jit_thread_attach, args);
11668 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
11671 MONO_START_BB (cfg, next_bb);
11677 case CEE_MONO_JIT_DETACH: {
11678 MonoInst *args [16];
11680 /* Restore the original domain */
11681 dreg = alloc_ireg (cfg);
11682 EMIT_NEW_UNALU (cfg, args [0], OP_MOVE, dreg, cfg->orig_domain_var->dreg);
11683 mono_emit_jit_icall (cfg, mono_jit_set_domain, args);
11688 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
11694 case CEE_PREFIX1: {
11697 case CEE_ARGLIST: {
11698 /* somewhat similar to LDTOKEN */
11699 MonoInst *addr, *vtvar;
11700 CHECK_STACK_OVF (1);
11701 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
11703 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11704 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
11706 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11707 ins->type = STACK_VTYPE;
11708 ins->klass = mono_defaults.argumenthandle_class;
11721 * The following transforms:
11722 * CEE_CEQ into OP_CEQ
11723 * CEE_CGT into OP_CGT
11724 * CEE_CGT_UN into OP_CGT_UN
11725 * CEE_CLT into OP_CLT
11726 * CEE_CLT_UN into OP_CLT_UN
11728 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
11730 MONO_INST_NEW (cfg, ins, cmp->opcode);
11732 cmp->sreg1 = sp [0]->dreg;
11733 cmp->sreg2 = sp [1]->dreg;
11734 type_from_op (cmp, sp [0], sp [1]);
11736 if ((sp [0]->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
11737 cmp->opcode = OP_LCOMPARE;
11738 else if (sp [0]->type == STACK_R8)
11739 cmp->opcode = OP_FCOMPARE;
11741 cmp->opcode = OP_ICOMPARE;
11742 MONO_ADD_INS (bblock, cmp);
11743 ins->type = STACK_I4;
11744 ins->dreg = alloc_dreg (cfg, ins->type);
11745 type_from_op (ins, sp [0], sp [1]);
11747 if (cmp->opcode == OP_FCOMPARE) {
11749 * The backends expect the fceq opcodes to do the
11752 ins->sreg1 = cmp->sreg1;
11753 ins->sreg2 = cmp->sreg2;
11756 MONO_ADD_INS (bblock, ins);
11762 MonoInst *argconst;
11763 MonoMethod *cil_method;
11765 CHECK_STACK_OVF (1);
11767 n = read32 (ip + 2);
11768 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
11769 if (!cmethod || mono_loader_get_last_error ())
11771 mono_class_init (cmethod->klass);
11773 mono_save_token_info (cfg, image, n, cmethod);
11775 context_used = mini_method_check_context_used (cfg, cmethod);
11777 cil_method = cmethod;
11778 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
11779 METHOD_ACCESS_FAILURE (method, cil_method);
11781 if (mono_security_cas_enabled ()) {
11782 if (check_linkdemand (cfg, method, cmethod))
11783 INLINE_FAILURE ("linkdemand");
11784 CHECK_CFG_EXCEPTION;
11785 } else if (mono_security_core_clr_enabled ()) {
11786 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
11790 * Optimize the common case of ldftn+delegate creation
11792 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
11793 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
11794 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
11795 MonoInst *target_ins, *handle_ins;
11796 MonoMethod *invoke;
11797 int invoke_context_used;
11799 invoke = mono_get_delegate_invoke (ctor_method->klass);
11800 if (!invoke || !mono_method_signature (invoke))
11803 invoke_context_used = mini_method_check_context_used (cfg, invoke);
11805 target_ins = sp [-1];
11807 if (mono_security_core_clr_enabled ())
11808 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method, bblock, ip);
11810 if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
11811 /*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
11812 if (mono_method_signature (invoke)->param_count == mono_method_signature (cmethod)->param_count) {
11813 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0);
11814 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException");
11818 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE)
11819 /* FIXME: SGEN support */
11820 if (invoke_context_used == 0) {
11822 if (cfg->verbose_level > 3)
11823 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
11824 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, FALSE))) {
11827 CHECK_CFG_EXCEPTION;
11838 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
11839 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
11843 inline_costs += 10 * num_calls++;
11846 case CEE_LDVIRTFTN: {
11847 MonoInst *args [2];
11851 n = read32 (ip + 2);
11852 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
11853 if (!cmethod || mono_loader_get_last_error ())
11855 mono_class_init (cmethod->klass);
11857 context_used = mini_method_check_context_used (cfg, cmethod);
11859 if (mono_security_cas_enabled ()) {
11860 if (check_linkdemand (cfg, method, cmethod))
11861 INLINE_FAILURE ("linkdemand");
11862 CHECK_CFG_EXCEPTION;
11863 } else if (mono_security_core_clr_enabled ()) {
11864 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
11868 * Optimize the common case of ldvirtftn+delegate creation
11870 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ) && (ip > header->code && ip [-1] == CEE_DUP)) {
11871 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
11872 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
11873 MonoInst *target_ins, *handle_ins;
11874 MonoMethod *invoke;
11875 int invoke_context_used;
11877 invoke = mono_get_delegate_invoke (ctor_method->klass);
11878 if (!invoke || !mono_method_signature (invoke))
11881 invoke_context_used = mini_method_check_context_used (cfg, invoke);
11883 target_ins = sp [-1];
11885 if (mono_security_core_clr_enabled ())
11886 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method, bblock, ip);
11888 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE)
11889 /* FIXME: SGEN support */
11890 if (invoke_context_used == 0) {
11892 if (cfg->verbose_level > 3)
11893 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
11894 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, TRUE))) {
11897 CHECK_CFG_EXCEPTION;
11911 args [1] = emit_get_rgctx_method (cfg, context_used,
11912 cmethod, MONO_RGCTX_INFO_METHOD);
11915 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
11917 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
11920 inline_costs += 10 * num_calls++;
11924 CHECK_STACK_OVF (1);
11926 n = read16 (ip + 2);
11928 EMIT_NEW_ARGLOAD (cfg, ins, n);
11933 CHECK_STACK_OVF (1);
11935 n = read16 (ip + 2);
11937 NEW_ARGLOADA (cfg, ins, n);
11938 MONO_ADD_INS (cfg->cbb, ins);
11946 n = read16 (ip + 2);
11948 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
11950 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
11954 CHECK_STACK_OVF (1);
11956 n = read16 (ip + 2);
11958 EMIT_NEW_LOCLOAD (cfg, ins, n);
11963 unsigned char *tmp_ip;
11964 CHECK_STACK_OVF (1);
11966 n = read16 (ip + 2);
11969 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
11975 EMIT_NEW_LOCLOADA (cfg, ins, n);
11984 n = read16 (ip + 2);
11986 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
11988 emit_stloc_ir (cfg, sp, header, n);
11995 if (sp != stack_start)
11997 if (cfg->method != method)
11999 * Inlining this into a loop in a parent could lead to
12000 * stack overflows which is different behavior than the
12001 * non-inlined case, thus disable inlining in this case.
12003 INLINE_FAILURE("localloc");
12005 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
12006 ins->dreg = alloc_preg (cfg);
12007 ins->sreg1 = sp [0]->dreg;
12008 ins->type = STACK_PTR;
12009 MONO_ADD_INS (cfg->cbb, ins);
12011 cfg->flags |= MONO_CFG_HAS_ALLOCA;
12013 ins->flags |= MONO_INST_INIT;
12018 case CEE_ENDFILTER: {
12019 MonoExceptionClause *clause, *nearest;
12020 int cc, nearest_num;
12024 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
12026 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
12027 ins->sreg1 = (*sp)->dreg;
12028 MONO_ADD_INS (bblock, ins);
12029 start_new_bblock = 1;
12034 for (cc = 0; cc < header->num_clauses; ++cc) {
12035 clause = &header->clauses [cc];
12036 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
12037 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
12038 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
12043 g_assert (nearest);
12044 if ((ip - header->code) != nearest->handler_offset)
12049 case CEE_UNALIGNED_:
12050 ins_flag |= MONO_INST_UNALIGNED;
12051 /* FIXME: record alignment? we can assume 1 for now */
12055 case CEE_VOLATILE_:
12056 ins_flag |= MONO_INST_VOLATILE;
12060 ins_flag |= MONO_INST_TAILCALL;
12061 cfg->flags |= MONO_CFG_HAS_TAIL;
12062 /* Can't inline tail calls at this time */
12063 inline_costs += 100000;
12070 token = read32 (ip + 2);
12071 klass = mini_get_class (method, token, generic_context);
12072 CHECK_TYPELOAD (klass);
12073 if (generic_class_is_reference_type (cfg, klass))
12074 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
12076 mini_emit_initobj (cfg, *sp, NULL, klass);
12080 case CEE_CONSTRAINED_:
12082 token = read32 (ip + 2);
12083 constrained_call = mini_get_class (method, token, generic_context);
12084 CHECK_TYPELOAD (constrained_call);
12088 case CEE_INITBLK: {
12089 MonoInst *iargs [3];
12093 /* Skip optimized paths for volatile operations. */
12094 if ((ip [1] == CEE_CPBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
12095 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
12096 } else if ((ip [1] == CEE_INITBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
12097 /* emit_memset only works when val == 0 */
12098 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
12101 iargs [0] = sp [0];
12102 iargs [1] = sp [1];
12103 iargs [2] = sp [2];
12104 if (ip [1] == CEE_CPBLK) {
12106 * FIXME: It's unclear whether we should be emitting both the acquire
12107 * and release barriers for cpblk. It is technically both a load and
12108 * store operation, so it seems like that's the sensible thing to do.
12110 MonoMethod *memcpy_method = get_memcpy_method ();
12111 if (ins_flag & MONO_INST_VOLATILE) {
12112 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
12113 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
12114 emit_memory_barrier (cfg, FullBarrier);
12116 call = mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
12117 call->flags |= ins_flag;
12118 if (ins_flag & MONO_INST_VOLATILE) {
12119 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
12120 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
12121 emit_memory_barrier (cfg, FullBarrier);
12124 MonoMethod *memset_method = get_memset_method ();
12125 if (ins_flag & MONO_INST_VOLATILE) {
12126 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
12127 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
12128 emit_memory_barrier (cfg, FullBarrier);
12130 call = mono_emit_method_call (cfg, memset_method, iargs, NULL);
12131 call->flags |= ins_flag;
12142 ins_flag |= MONO_INST_NOTYPECHECK;
12144 ins_flag |= MONO_INST_NORANGECHECK;
12145 /* we ignore the no-nullcheck for now since we
12146 * really do it explicitly only when doing callvirt->call
12150 case CEE_RETHROW: {
12152 int handler_offset = -1;
12154 for (i = 0; i < header->num_clauses; ++i) {
12155 MonoExceptionClause *clause = &header->clauses [i];
12156 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
12157 handler_offset = clause->handler_offset;
12162 bblock->flags |= BB_EXCEPTION_UNSAFE;
12164 if (handler_offset == -1)
12167 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
12168 MONO_INST_NEW (cfg, ins, OP_RETHROW);
12169 ins->sreg1 = load->dreg;
12170 MONO_ADD_INS (bblock, ins);
12172 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
12173 MONO_ADD_INS (bblock, ins);
12176 link_bblock (cfg, bblock, end_bblock);
12177 start_new_bblock = 1;
12185 CHECK_STACK_OVF (1);
12187 token = read32 (ip + 2);
12188 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !image_is_dynamic (method->klass->image) && !generic_context) {
12189 MonoType *type = mono_type_create_from_typespec_checked (image, token, &cfg->error);
12192 val = mono_type_size (type, &ialign);
12194 MonoClass *klass = mini_get_class (method, token, generic_context);
12195 CHECK_TYPELOAD (klass);
12197 val = mono_type_size (&klass->byval_arg, &ialign);
12199 if (mini_is_gsharedvt_klass (cfg, klass))
12200 GSHAREDVT_FAILURE (*ip);
12202 EMIT_NEW_ICONST (cfg, ins, val);
12207 case CEE_REFANYTYPE: {
12208 MonoInst *src_var, *src;
12210 GSHAREDVT_FAILURE (*ip);
12216 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
12218 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
12219 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
12220 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type));
12225 case CEE_READONLY_:
12238 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
12248 g_warning ("opcode 0x%02x not handled", *ip);
12252 if (start_new_bblock != 1)
12255 bblock->cil_length = ip - bblock->cil_code;
12256 if (bblock->next_bb) {
12257 /* This could already be set because of inlining, #693905 */
12258 MonoBasicBlock *bb = bblock;
12260 while (bb->next_bb)
12262 bb->next_bb = end_bblock;
12264 bblock->next_bb = end_bblock;
12267 if (cfg->method == method && cfg->domainvar) {
12269 MonoInst *get_domain;
12271 cfg->cbb = init_localsbb;
12273 if ((get_domain = mono_get_domain_intrinsic (cfg))) {
12274 MONO_ADD_INS (cfg->cbb, get_domain);
12276 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
12278 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
12279 MONO_ADD_INS (cfg->cbb, store);
12282 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
12283 if (cfg->compile_aot)
12284 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
12285 mono_get_got_var (cfg);
12288 if (cfg->method == method && cfg->got_var)
12289 mono_emit_load_got_addr (cfg);
12291 if (init_localsbb) {
12292 cfg->cbb = init_localsbb;
12294 for (i = 0; i < header->num_locals; ++i) {
12295 emit_init_local (cfg, i, header->locals [i], init_locals);
12299 if (cfg->init_ref_vars && cfg->method == method) {
12300 /* Emit initialization for ref vars */
12301 // FIXME: Avoid duplication initialization for IL locals.
12302 for (i = 0; i < cfg->num_varinfo; ++i) {
12303 MonoInst *ins = cfg->varinfo [i];
12305 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
12306 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
12310 if (cfg->lmf_var && cfg->method == method) {
12311 cfg->cbb = init_localsbb;
12312 emit_push_lmf (cfg);
12315 cfg->cbb = init_localsbb;
12316 emit_instrumentation_call (cfg, mono_profiler_method_enter);
12319 MonoBasicBlock *bb;
12322 * Make seq points at backward branch targets interruptable.
12324 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
12325 if (bb->code && bb->in_count > 1 && bb->code->opcode == OP_SEQ_POINT)
12326 bb->code->flags |= MONO_INST_SINGLE_STEP_LOC;
12329 /* Add a sequence point for method entry/exit events */
12330 if (cfg->gen_seq_points_debug_data) {
12331 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
12332 MONO_ADD_INS (init_localsbb, ins);
12333 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
12334 MONO_ADD_INS (cfg->bb_exit, ins);
12338 * Add seq points for IL offsets which have line number info, but wasn't generated a seq point during JITting because
12339 * the code they refer to was dead (#11880).
12341 if (sym_seq_points) {
12342 for (i = 0; i < header->code_size; ++i) {
12343 if (mono_bitset_test_fast (seq_point_locs, i) && !mono_bitset_test_fast (seq_point_set_locs, i)) {
12346 NEW_SEQ_POINT (cfg, ins, i, FALSE);
12347 mono_add_seq_point (cfg, NULL, ins, SEQ_POINT_NATIVE_OFFSET_DEAD_CODE);
12354 if (cfg->method == method) {
12355 MonoBasicBlock *bb;
12356 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
12357 bb->region = mono_find_block_region (cfg, bb->real_offset);
12359 mono_create_spvar_for_region (cfg, bb->region);
12360 if (cfg->verbose_level > 2)
12361 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
12365 if (inline_costs < 0) {
12368 /* Method is too large */
12369 mname = mono_method_full_name (method, TRUE);
12370 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
12371 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
12375 if ((cfg->verbose_level > 2) && (cfg->method == method))
12376 mono_print_code (cfg, "AFTER METHOD-TO-IR");
12381 g_assert (!mono_error_ok (&cfg->error));
12385 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
12389 set_exception_type_from_invalid_il (cfg, method, ip);
12393 g_slist_free (class_inits);
12394 mono_basic_block_free (original_bb);
12395 cfg->dont_inline = g_list_remove (cfg->dont_inline, method);
12396 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
12397 if (cfg->exception_type)
12400 return inline_costs;
12404 store_membase_reg_to_store_membase_imm (int opcode)
12407 case OP_STORE_MEMBASE_REG:
12408 return OP_STORE_MEMBASE_IMM;
12409 case OP_STOREI1_MEMBASE_REG:
12410 return OP_STOREI1_MEMBASE_IMM;
12411 case OP_STOREI2_MEMBASE_REG:
12412 return OP_STOREI2_MEMBASE_IMM;
12413 case OP_STOREI4_MEMBASE_REG:
12414 return OP_STOREI4_MEMBASE_IMM;
12415 case OP_STOREI8_MEMBASE_REG:
12416 return OP_STOREI8_MEMBASE_IMM;
12418 g_assert_not_reached ();
12425 mono_op_to_op_imm (int opcode)
12429 return OP_IADD_IMM;
12431 return OP_ISUB_IMM;
12433 return OP_IDIV_IMM;
12435 return OP_IDIV_UN_IMM;
12437 return OP_IREM_IMM;
12439 return OP_IREM_UN_IMM;
12441 return OP_IMUL_IMM;
12443 return OP_IAND_IMM;
12447 return OP_IXOR_IMM;
12449 return OP_ISHL_IMM;
12451 return OP_ISHR_IMM;
12453 return OP_ISHR_UN_IMM;
12456 return OP_LADD_IMM;
12458 return OP_LSUB_IMM;
12460 return OP_LAND_IMM;
12464 return OP_LXOR_IMM;
12466 return OP_LSHL_IMM;
12468 return OP_LSHR_IMM;
12470 return OP_LSHR_UN_IMM;
12471 #if SIZEOF_REGISTER == 8
12473 return OP_LREM_IMM;
12477 return OP_COMPARE_IMM;
12479 return OP_ICOMPARE_IMM;
12481 return OP_LCOMPARE_IMM;
12483 case OP_STORE_MEMBASE_REG:
12484 return OP_STORE_MEMBASE_IMM;
12485 case OP_STOREI1_MEMBASE_REG:
12486 return OP_STOREI1_MEMBASE_IMM;
12487 case OP_STOREI2_MEMBASE_REG:
12488 return OP_STOREI2_MEMBASE_IMM;
12489 case OP_STOREI4_MEMBASE_REG:
12490 return OP_STOREI4_MEMBASE_IMM;
12492 #if defined(TARGET_X86) || defined (TARGET_AMD64)
12494 return OP_X86_PUSH_IMM;
12495 case OP_X86_COMPARE_MEMBASE_REG:
12496 return OP_X86_COMPARE_MEMBASE_IMM;
12498 #if defined(TARGET_AMD64)
12499 case OP_AMD64_ICOMPARE_MEMBASE_REG:
12500 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
12502 case OP_VOIDCALL_REG:
12503 return OP_VOIDCALL;
12511 return OP_LOCALLOC_IMM;
12518 ldind_to_load_membase (int opcode)
12522 return OP_LOADI1_MEMBASE;
12524 return OP_LOADU1_MEMBASE;
12526 return OP_LOADI2_MEMBASE;
12528 return OP_LOADU2_MEMBASE;
12530 return OP_LOADI4_MEMBASE;
12532 return OP_LOADU4_MEMBASE;
12534 return OP_LOAD_MEMBASE;
12535 case CEE_LDIND_REF:
12536 return OP_LOAD_MEMBASE;
12538 return OP_LOADI8_MEMBASE;
12540 return OP_LOADR4_MEMBASE;
12542 return OP_LOADR8_MEMBASE;
12544 g_assert_not_reached ();
12551 stind_to_store_membase (int opcode)
12555 return OP_STOREI1_MEMBASE_REG;
12557 return OP_STOREI2_MEMBASE_REG;
12559 return OP_STOREI4_MEMBASE_REG;
12561 case CEE_STIND_REF:
12562 return OP_STORE_MEMBASE_REG;
12564 return OP_STOREI8_MEMBASE_REG;
12566 return OP_STORER4_MEMBASE_REG;
12568 return OP_STORER8_MEMBASE_REG;
12570 g_assert_not_reached ();
12577 mono_load_membase_to_load_mem (int opcode)
12579 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
12580 #if defined(TARGET_X86) || defined(TARGET_AMD64)
12582 case OP_LOAD_MEMBASE:
12583 return OP_LOAD_MEM;
12584 case OP_LOADU1_MEMBASE:
12585 return OP_LOADU1_MEM;
12586 case OP_LOADU2_MEMBASE:
12587 return OP_LOADU2_MEM;
12588 case OP_LOADI4_MEMBASE:
12589 return OP_LOADI4_MEM;
12590 case OP_LOADU4_MEMBASE:
12591 return OP_LOADU4_MEM;
12592 #if SIZEOF_REGISTER == 8
12593 case OP_LOADI8_MEMBASE:
12594 return OP_LOADI8_MEM;
12603 op_to_op_dest_membase (int store_opcode, int opcode)
12605 #if defined(TARGET_X86)
12606 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
12611 return OP_X86_ADD_MEMBASE_REG;
12613 return OP_X86_SUB_MEMBASE_REG;
12615 return OP_X86_AND_MEMBASE_REG;
12617 return OP_X86_OR_MEMBASE_REG;
12619 return OP_X86_XOR_MEMBASE_REG;
12622 return OP_X86_ADD_MEMBASE_IMM;
12625 return OP_X86_SUB_MEMBASE_IMM;
12628 return OP_X86_AND_MEMBASE_IMM;
12631 return OP_X86_OR_MEMBASE_IMM;
12634 return OP_X86_XOR_MEMBASE_IMM;
12640 #if defined(TARGET_AMD64)
12641 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
12646 return OP_X86_ADD_MEMBASE_REG;
12648 return OP_X86_SUB_MEMBASE_REG;
12650 return OP_X86_AND_MEMBASE_REG;
12652 return OP_X86_OR_MEMBASE_REG;
12654 return OP_X86_XOR_MEMBASE_REG;
12656 return OP_X86_ADD_MEMBASE_IMM;
12658 return OP_X86_SUB_MEMBASE_IMM;
12660 return OP_X86_AND_MEMBASE_IMM;
12662 return OP_X86_OR_MEMBASE_IMM;
12664 return OP_X86_XOR_MEMBASE_IMM;
12666 return OP_AMD64_ADD_MEMBASE_REG;
12668 return OP_AMD64_SUB_MEMBASE_REG;
12670 return OP_AMD64_AND_MEMBASE_REG;
12672 return OP_AMD64_OR_MEMBASE_REG;
12674 return OP_AMD64_XOR_MEMBASE_REG;
12677 return OP_AMD64_ADD_MEMBASE_IMM;
12680 return OP_AMD64_SUB_MEMBASE_IMM;
12683 return OP_AMD64_AND_MEMBASE_IMM;
12686 return OP_AMD64_OR_MEMBASE_IMM;
12689 return OP_AMD64_XOR_MEMBASE_IMM;
12699 op_to_op_store_membase (int store_opcode, int opcode)
12701 #if defined(TARGET_X86) || defined(TARGET_AMD64)
12704 if (store_opcode == OP_STOREI1_MEMBASE_REG)
12705 return OP_X86_SETEQ_MEMBASE;
12707 if (store_opcode == OP_STOREI1_MEMBASE_REG)
12708 return OP_X86_SETNE_MEMBASE;
12716 op_to_op_src1_membase (int load_opcode, int opcode)
12719 /* FIXME: This has sign extension issues */
12721 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
12722 return OP_X86_COMPARE_MEMBASE8_IMM;
12725 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
12730 return OP_X86_PUSH_MEMBASE;
12731 case OP_COMPARE_IMM:
12732 case OP_ICOMPARE_IMM:
12733 return OP_X86_COMPARE_MEMBASE_IMM;
12736 return OP_X86_COMPARE_MEMBASE_REG;
12740 #ifdef TARGET_AMD64
12741 /* FIXME: This has sign extension issues */
12743 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
12744 return OP_X86_COMPARE_MEMBASE8_IMM;
12749 #ifdef __mono_ilp32__
12750 if (load_opcode == OP_LOADI8_MEMBASE)
12752 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
12754 return OP_X86_PUSH_MEMBASE;
12756 /* FIXME: This only works for 32 bit immediates
12757 case OP_COMPARE_IMM:
12758 case OP_LCOMPARE_IMM:
12759 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
12760 return OP_AMD64_COMPARE_MEMBASE_IMM;
12762 case OP_ICOMPARE_IMM:
12763 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
12764 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
12768 #ifdef __mono_ilp32__
12769 if (load_opcode == OP_LOAD_MEMBASE)
12770 return OP_AMD64_ICOMPARE_MEMBASE_REG;
12771 if (load_opcode == OP_LOADI8_MEMBASE)
12773 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
12775 return OP_AMD64_COMPARE_MEMBASE_REG;
12778 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
12779 return OP_AMD64_ICOMPARE_MEMBASE_REG;
12788 op_to_op_src2_membase (int load_opcode, int opcode)
12791 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
12797 return OP_X86_COMPARE_REG_MEMBASE;
12799 return OP_X86_ADD_REG_MEMBASE;
12801 return OP_X86_SUB_REG_MEMBASE;
12803 return OP_X86_AND_REG_MEMBASE;
12805 return OP_X86_OR_REG_MEMBASE;
12807 return OP_X86_XOR_REG_MEMBASE;
12811 #ifdef TARGET_AMD64
12812 #ifdef __mono_ilp32__
12813 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE) ) {
12815 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)) {
12819 return OP_AMD64_ICOMPARE_REG_MEMBASE;
12821 return OP_X86_ADD_REG_MEMBASE;
12823 return OP_X86_SUB_REG_MEMBASE;
12825 return OP_X86_AND_REG_MEMBASE;
12827 return OP_X86_OR_REG_MEMBASE;
12829 return OP_X86_XOR_REG_MEMBASE;
12831 #ifdef __mono_ilp32__
12832 } else if (load_opcode == OP_LOADI8_MEMBASE) {
12834 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE)) {
12839 return OP_AMD64_COMPARE_REG_MEMBASE;
12841 return OP_AMD64_ADD_REG_MEMBASE;
12843 return OP_AMD64_SUB_REG_MEMBASE;
12845 return OP_AMD64_AND_REG_MEMBASE;
12847 return OP_AMD64_OR_REG_MEMBASE;
12849 return OP_AMD64_XOR_REG_MEMBASE;
12858 mono_op_to_op_imm_noemul (int opcode)
12861 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
12867 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
12874 #if defined(MONO_ARCH_EMULATE_MUL_DIV)
12879 return mono_op_to_op_imm (opcode);
12884 * mono_handle_global_vregs:
12886 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
12890 mono_handle_global_vregs (MonoCompile *cfg)
12892 gint32 *vreg_to_bb;
12893 MonoBasicBlock *bb;
12896 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
12898 #ifdef MONO_ARCH_SIMD_INTRINSICS
12899 if (cfg->uses_simd_intrinsics)
12900 mono_simd_simplify_indirection (cfg);
12903 /* Find local vregs used in more than one bb */
12904 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
12905 MonoInst *ins = bb->code;
12906 int block_num = bb->block_num;
12908 if (cfg->verbose_level > 2)
12909 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
12912 for (; ins; ins = ins->next) {
12913 const char *spec = INS_INFO (ins->opcode);
12914 int regtype = 0, regindex;
12917 if (G_UNLIKELY (cfg->verbose_level > 2))
12918 mono_print_ins (ins);
12920 g_assert (ins->opcode >= MONO_CEE_LAST);
12922 for (regindex = 0; regindex < 4; regindex ++) {
12925 if (regindex == 0) {
12926 regtype = spec [MONO_INST_DEST];
12927 if (regtype == ' ')
12930 } else if (regindex == 1) {
12931 regtype = spec [MONO_INST_SRC1];
12932 if (regtype == ' ')
12935 } else if (regindex == 2) {
12936 regtype = spec [MONO_INST_SRC2];
12937 if (regtype == ' ')
12940 } else if (regindex == 3) {
12941 regtype = spec [MONO_INST_SRC3];
12942 if (regtype == ' ')
12947 #if SIZEOF_REGISTER == 4
12948 /* In the LLVM case, the long opcodes are not decomposed */
12949 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
12951 * Since some instructions reference the original long vreg,
12952 * and some reference the two component vregs, it is quite hard
12953 * to determine when it needs to be global. So be conservative.
12955 if (!get_vreg_to_inst (cfg, vreg)) {
12956 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
12958 if (cfg->verbose_level > 2)
12959 printf ("LONG VREG R%d made global.\n", vreg);
12963 * Make the component vregs volatile since the optimizations can
12964 * get confused otherwise.
12966 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
12967 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
12971 g_assert (vreg != -1);
12973 prev_bb = vreg_to_bb [vreg];
12974 if (prev_bb == 0) {
12975 /* 0 is a valid block num */
12976 vreg_to_bb [vreg] = block_num + 1;
12977 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
12978 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
12981 if (!get_vreg_to_inst (cfg, vreg)) {
12982 if (G_UNLIKELY (cfg->verbose_level > 2))
12983 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
12987 if (vreg_is_ref (cfg, vreg))
12988 mono_compile_create_var_for_vreg (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL, vreg);
12990 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
12993 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
12996 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
12999 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
13002 g_assert_not_reached ();
13006 /* Flag as having been used in more than one bb */
13007 vreg_to_bb [vreg] = -1;
13013 /* If a variable is used in only one bblock, convert it into a local vreg */
13014 for (i = 0; i < cfg->num_varinfo; i++) {
13015 MonoInst *var = cfg->varinfo [i];
13016 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
13018 switch (var->type) {
13024 #if SIZEOF_REGISTER == 8
13027 #if !defined(TARGET_X86)
13028 /* Enabling this screws up the fp stack on x86 */
13031 if (mono_arch_is_soft_float ())
13034 /* Arguments are implicitly global */
13035 /* Putting R4 vars into registers doesn't work currently */
13036 /* The gsharedvt vars are implicitly referenced by ldaddr opcodes, but those opcodes are only generated later */
13037 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg && var != cfg->gsharedvt_info_var && var != cfg->gsharedvt_locals_var && var != cfg->lmf_addr_var) {
13039 * Make that the variable's liveness interval doesn't contain a call, since
13040 * that would cause the lvreg to be spilled, making the whole optimization
13043 /* This is too slow for JIT compilation */
13045 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
13047 int def_index, call_index, ins_index;
13048 gboolean spilled = FALSE;
13053 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
13054 const char *spec = INS_INFO (ins->opcode);
13056 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
13057 def_index = ins_index;
13059 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
13060 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
13061 if (call_index > def_index) {
13067 if (MONO_IS_CALL (ins))
13068 call_index = ins_index;
13078 if (G_UNLIKELY (cfg->verbose_level > 2))
13079 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
13080 var->flags |= MONO_INST_IS_DEAD;
13081 cfg->vreg_to_inst [var->dreg] = NULL;
13088 * Compress the varinfo and vars tables so the liveness computation is faster and
13089 * takes up less space.
13092 for (i = 0; i < cfg->num_varinfo; ++i) {
13093 MonoInst *var = cfg->varinfo [i];
13094 if (pos < i && cfg->locals_start == i)
13095 cfg->locals_start = pos;
13096 if (!(var->flags & MONO_INST_IS_DEAD)) {
13098 cfg->varinfo [pos] = cfg->varinfo [i];
13099 cfg->varinfo [pos]->inst_c0 = pos;
13100 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
13101 cfg->vars [pos].idx = pos;
13102 #if SIZEOF_REGISTER == 4
13103 if (cfg->varinfo [pos]->type == STACK_I8) {
13104 /* Modify the two component vars too */
13107 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
13108 var1->inst_c0 = pos;
13109 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
13110 var1->inst_c0 = pos;
13117 cfg->num_varinfo = pos;
13118 if (cfg->locals_start > cfg->num_varinfo)
13119 cfg->locals_start = cfg->num_varinfo;
13123 * mono_spill_global_vars:
13125 * Generate spill code for variables which are not allocated to registers,
13126 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
13127 * code is generated which could be optimized by the local optimization passes.
13130 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
13132 MonoBasicBlock *bb;
13134 int orig_next_vreg;
13135 guint32 *vreg_to_lvreg;
13137 guint32 i, lvregs_len;
13138 gboolean dest_has_lvreg = FALSE;
13139 guint32 stacktypes [128];
13140 MonoInst **live_range_start, **live_range_end;
13141 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
13142 int *gsharedvt_vreg_to_idx = NULL;
13144 *need_local_opts = FALSE;
13146 memset (spec2, 0, sizeof (spec2));
13148 /* FIXME: Move this function to mini.c */
13149 stacktypes ['i'] = STACK_PTR;
13150 stacktypes ['l'] = STACK_I8;
13151 stacktypes ['f'] = STACK_R8;
13152 #ifdef MONO_ARCH_SIMD_INTRINSICS
13153 stacktypes ['x'] = STACK_VTYPE;
13156 #if SIZEOF_REGISTER == 4
13157 /* Create MonoInsts for longs */
13158 for (i = 0; i < cfg->num_varinfo; i++) {
13159 MonoInst *ins = cfg->varinfo [i];
13161 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
13162 switch (ins->type) {
13167 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
13170 g_assert (ins->opcode == OP_REGOFFSET);
13172 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
13174 tree->opcode = OP_REGOFFSET;
13175 tree->inst_basereg = ins->inst_basereg;
13176 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
13178 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
13180 tree->opcode = OP_REGOFFSET;
13181 tree->inst_basereg = ins->inst_basereg;
13182 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
13192 if (cfg->compute_gc_maps) {
13193 /* registers need liveness info even for !non refs */
13194 for (i = 0; i < cfg->num_varinfo; i++) {
13195 MonoInst *ins = cfg->varinfo [i];
13197 if (ins->opcode == OP_REGVAR)
13198 ins->flags |= MONO_INST_GC_TRACK;
13202 if (cfg->gsharedvt) {
13203 gsharedvt_vreg_to_idx = mono_mempool_alloc0 (cfg->mempool, sizeof (int) * cfg->next_vreg);
13205 for (i = 0; i < cfg->num_varinfo; ++i) {
13206 MonoInst *ins = cfg->varinfo [i];
13209 if (mini_is_gsharedvt_variable_type (cfg, ins->inst_vtype)) {
13210 if (i >= cfg->locals_start) {
13212 idx = get_gsharedvt_info_slot (cfg, ins->inst_vtype, MONO_RGCTX_INFO_LOCAL_OFFSET);
13213 gsharedvt_vreg_to_idx [ins->dreg] = idx + 1;
13214 ins->opcode = OP_GSHAREDVT_LOCAL;
13215 ins->inst_imm = idx;
13218 gsharedvt_vreg_to_idx [ins->dreg] = -1;
13219 ins->opcode = OP_GSHAREDVT_ARG_REGOFFSET;
13225 /* FIXME: widening and truncation */
13228 * As an optimization, when a variable allocated to the stack is first loaded into
13229 * an lvreg, we will remember the lvreg and use it the next time instead of loading
13230 * the variable again.
13232 orig_next_vreg = cfg->next_vreg;
13233 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
13234 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
13238 * These arrays contain the first and last instructions accessing a given
13240 * Since we emit bblocks in the same order we process them here, and we
13241 * don't split live ranges, these will precisely describe the live range of
13242 * the variable, i.e. the instruction range where a valid value can be found
13243 * in the variables location.
13244 * The live range is computed using the liveness info computed by the liveness pass.
13245 * We can't use vmv->range, since that is an abstract live range, and we need
13246 * one which is instruction precise.
13247 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
13249 /* FIXME: Only do this if debugging info is requested */
13250 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
13251 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
13252 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
13253 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
13255 /* Add spill loads/stores */
13256 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13259 if (cfg->verbose_level > 2)
13260 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
13262 /* Clear vreg_to_lvreg array */
13263 for (i = 0; i < lvregs_len; i++)
13264 vreg_to_lvreg [lvregs [i]] = 0;
13268 MONO_BB_FOR_EACH_INS (bb, ins) {
13269 const char *spec = INS_INFO (ins->opcode);
13270 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
13271 gboolean store, no_lvreg;
13272 int sregs [MONO_MAX_SRC_REGS];
13274 if (G_UNLIKELY (cfg->verbose_level > 2))
13275 mono_print_ins (ins);
13277 if (ins->opcode == OP_NOP)
13281 * We handle LDADDR here as well, since it can only be decomposed
13282 * when variable addresses are known.
13284 if (ins->opcode == OP_LDADDR) {
13285 MonoInst *var = ins->inst_p0;
13287 if (var->opcode == OP_VTARG_ADDR) {
13288 /* Happens on SPARC/S390 where vtypes are passed by reference */
13289 MonoInst *vtaddr = var->inst_left;
13290 if (vtaddr->opcode == OP_REGVAR) {
13291 ins->opcode = OP_MOVE;
13292 ins->sreg1 = vtaddr->dreg;
13294 else if (var->inst_left->opcode == OP_REGOFFSET) {
13295 ins->opcode = OP_LOAD_MEMBASE;
13296 ins->inst_basereg = vtaddr->inst_basereg;
13297 ins->inst_offset = vtaddr->inst_offset;
13300 } else if (cfg->gsharedvt && gsharedvt_vreg_to_idx [var->dreg] < 0) {
13301 /* gsharedvt arg passed by ref */
13302 g_assert (var->opcode == OP_GSHAREDVT_ARG_REGOFFSET);
13304 ins->opcode = OP_LOAD_MEMBASE;
13305 ins->inst_basereg = var->inst_basereg;
13306 ins->inst_offset = var->inst_offset;
13307 } else if (cfg->gsharedvt && gsharedvt_vreg_to_idx [var->dreg]) {
13308 MonoInst *load, *load2, *load3;
13309 int idx = gsharedvt_vreg_to_idx [var->dreg] - 1;
13310 int reg1, reg2, reg3;
13311 MonoInst *info_var = cfg->gsharedvt_info_var;
13312 MonoInst *locals_var = cfg->gsharedvt_locals_var;
13316 * Compute the address of the local as gsharedvt_locals_var + gsharedvt_info_var->locals_offsets [idx].
13319 g_assert (var->opcode == OP_GSHAREDVT_LOCAL);
13321 g_assert (info_var);
13322 g_assert (locals_var);
13324 /* Mark the instruction used to compute the locals var as used */
13325 cfg->gsharedvt_locals_var_ins = NULL;
13327 /* Load the offset */
13328 if (info_var->opcode == OP_REGOFFSET) {
13329 reg1 = alloc_ireg (cfg);
13330 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, reg1, info_var->inst_basereg, info_var->inst_offset);
13331 } else if (info_var->opcode == OP_REGVAR) {
13333 reg1 = info_var->dreg;
13335 g_assert_not_reached ();
13337 reg2 = alloc_ireg (cfg);
13338 NEW_LOAD_MEMBASE (cfg, load2, OP_LOADI4_MEMBASE, reg2, reg1, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
13339 /* Load the locals area address */
13340 reg3 = alloc_ireg (cfg);
13341 if (locals_var->opcode == OP_REGOFFSET) {
13342 NEW_LOAD_MEMBASE (cfg, load3, OP_LOAD_MEMBASE, reg3, locals_var->inst_basereg, locals_var->inst_offset);
13343 } else if (locals_var->opcode == OP_REGVAR) {
13344 NEW_UNALU (cfg, load3, OP_MOVE, reg3, locals_var->dreg);
13346 g_assert_not_reached ();
13348 /* Compute the address */
13349 ins->opcode = OP_PADD;
13353 mono_bblock_insert_before_ins (bb, ins, load3);
13354 mono_bblock_insert_before_ins (bb, load3, load2);
13356 mono_bblock_insert_before_ins (bb, load2, load);
13358 g_assert (var->opcode == OP_REGOFFSET);
13360 ins->opcode = OP_ADD_IMM;
13361 ins->sreg1 = var->inst_basereg;
13362 ins->inst_imm = var->inst_offset;
13365 *need_local_opts = TRUE;
13366 spec = INS_INFO (ins->opcode);
13369 if (ins->opcode < MONO_CEE_LAST) {
13370 mono_print_ins (ins);
13371 g_assert_not_reached ();
13375 * Store opcodes have destbasereg in the dreg, but in reality, it is an
13379 if (MONO_IS_STORE_MEMBASE (ins)) {
13380 tmp_reg = ins->dreg;
13381 ins->dreg = ins->sreg2;
13382 ins->sreg2 = tmp_reg;
13385 spec2 [MONO_INST_DEST] = ' ';
13386 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
13387 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
13388 spec2 [MONO_INST_SRC3] = ' ';
13390 } else if (MONO_IS_STORE_MEMINDEX (ins))
13391 g_assert_not_reached ();
13396 if (G_UNLIKELY (cfg->verbose_level > 2)) {
13397 printf ("\t %.3s %d", spec, ins->dreg);
13398 num_sregs = mono_inst_get_src_registers (ins, sregs);
13399 for (srcindex = 0; srcindex < num_sregs; ++srcindex)
13400 printf (" %d", sregs [srcindex]);
13407 regtype = spec [MONO_INST_DEST];
13408 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
13411 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
13412 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
13413 MonoInst *store_ins;
13415 MonoInst *def_ins = ins;
13416 int dreg = ins->dreg; /* The original vreg */
13418 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
13420 if (var->opcode == OP_REGVAR) {
13421 ins->dreg = var->dreg;
13422 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
13424 * Instead of emitting a load+store, use a _membase opcode.
13426 g_assert (var->opcode == OP_REGOFFSET);
13427 if (ins->opcode == OP_MOVE) {
13431 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
13432 ins->inst_basereg = var->inst_basereg;
13433 ins->inst_offset = var->inst_offset;
13436 spec = INS_INFO (ins->opcode);
13440 g_assert (var->opcode == OP_REGOFFSET);
13442 prev_dreg = ins->dreg;
13444 /* Invalidate any previous lvreg for this vreg */
13445 vreg_to_lvreg [ins->dreg] = 0;
13449 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
13451 store_opcode = OP_STOREI8_MEMBASE_REG;
13454 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
13456 #if SIZEOF_REGISTER != 8
13457 if (regtype == 'l') {
13458 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
13459 mono_bblock_insert_after_ins (bb, ins, store_ins);
13460 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
13461 mono_bblock_insert_after_ins (bb, ins, store_ins);
13462 def_ins = store_ins;
13467 g_assert (store_opcode != OP_STOREV_MEMBASE);
13469 /* Try to fuse the store into the instruction itself */
13470 /* FIXME: Add more instructions */
13471 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
13472 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
13473 ins->inst_imm = ins->inst_c0;
13474 ins->inst_destbasereg = var->inst_basereg;
13475 ins->inst_offset = var->inst_offset;
13476 spec = INS_INFO (ins->opcode);
13477 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
13478 ins->opcode = store_opcode;
13479 ins->inst_destbasereg = var->inst_basereg;
13480 ins->inst_offset = var->inst_offset;
13484 tmp_reg = ins->dreg;
13485 ins->dreg = ins->sreg2;
13486 ins->sreg2 = tmp_reg;
13489 spec2 [MONO_INST_DEST] = ' ';
13490 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
13491 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
13492 spec2 [MONO_INST_SRC3] = ' ';
13494 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
13495 // FIXME: The backends expect the base reg to be in inst_basereg
13496 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
13498 ins->inst_basereg = var->inst_basereg;
13499 ins->inst_offset = var->inst_offset;
13500 spec = INS_INFO (ins->opcode);
13502 /* printf ("INS: "); mono_print_ins (ins); */
13503 /* Create a store instruction */
13504 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
13506 /* Insert it after the instruction */
13507 mono_bblock_insert_after_ins (bb, ins, store_ins);
13509 def_ins = store_ins;
13512 * We can't assign ins->dreg to var->dreg here, since the
13513 * sregs could use it. So set a flag, and do it after
13516 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
13517 dest_has_lvreg = TRUE;
13522 if (def_ins && !live_range_start [dreg]) {
13523 live_range_start [dreg] = def_ins;
13524 live_range_start_bb [dreg] = bb;
13527 if (cfg->compute_gc_maps && def_ins && (var->flags & MONO_INST_GC_TRACK)) {
13530 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_DEF);
13531 tmp->inst_c1 = dreg;
13532 mono_bblock_insert_after_ins (bb, def_ins, tmp);
13539 num_sregs = mono_inst_get_src_registers (ins, sregs);
13540 for (srcindex = 0; srcindex < 3; ++srcindex) {
13541 regtype = spec [MONO_INST_SRC1 + srcindex];
13542 sreg = sregs [srcindex];
13544 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
13545 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
13546 MonoInst *var = get_vreg_to_inst (cfg, sreg);
13547 MonoInst *use_ins = ins;
13548 MonoInst *load_ins;
13549 guint32 load_opcode;
13551 if (var->opcode == OP_REGVAR) {
13552 sregs [srcindex] = var->dreg;
13553 //mono_inst_set_src_registers (ins, sregs);
13554 live_range_end [sreg] = use_ins;
13555 live_range_end_bb [sreg] = bb;
13557 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
13560 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
13561 /* var->dreg is a hreg */
13562 tmp->inst_c1 = sreg;
13563 mono_bblock_insert_after_ins (bb, ins, tmp);
13569 g_assert (var->opcode == OP_REGOFFSET);
13571 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
13573 g_assert (load_opcode != OP_LOADV_MEMBASE);
13575 if (vreg_to_lvreg [sreg]) {
13576 g_assert (vreg_to_lvreg [sreg] != -1);
13578 /* The variable is already loaded to an lvreg */
13579 if (G_UNLIKELY (cfg->verbose_level > 2))
13580 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
13581 sregs [srcindex] = vreg_to_lvreg [sreg];
13582 //mono_inst_set_src_registers (ins, sregs);
13586 /* Try to fuse the load into the instruction */
13587 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
13588 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
13589 sregs [0] = var->inst_basereg;
13590 //mono_inst_set_src_registers (ins, sregs);
13591 ins->inst_offset = var->inst_offset;
13592 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
13593 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
13594 sregs [1] = var->inst_basereg;
13595 //mono_inst_set_src_registers (ins, sregs);
13596 ins->inst_offset = var->inst_offset;
13598 if (MONO_IS_REAL_MOVE (ins)) {
13599 ins->opcode = OP_NOP;
13602 //printf ("%d ", srcindex); mono_print_ins (ins);
13604 sreg = alloc_dreg (cfg, stacktypes [regtype]);
13606 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
13607 if (var->dreg == prev_dreg) {
13609 * sreg refers to the value loaded by the load
13610 * emitted below, but we need to use ins->dreg
13611 * since it refers to the store emitted earlier.
13615 g_assert (sreg != -1);
13616 vreg_to_lvreg [var->dreg] = sreg;
13617 g_assert (lvregs_len < 1024);
13618 lvregs [lvregs_len ++] = var->dreg;
13622 sregs [srcindex] = sreg;
13623 //mono_inst_set_src_registers (ins, sregs);
13625 #if SIZEOF_REGISTER != 8
13626 if (regtype == 'l') {
13627 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
13628 mono_bblock_insert_before_ins (bb, ins, load_ins);
13629 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
13630 mono_bblock_insert_before_ins (bb, ins, load_ins);
13631 use_ins = load_ins;
13636 #if SIZEOF_REGISTER == 4
13637 g_assert (load_opcode != OP_LOADI8_MEMBASE);
13639 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
13640 mono_bblock_insert_before_ins (bb, ins, load_ins);
13641 use_ins = load_ins;
13645 if (var->dreg < orig_next_vreg) {
13646 live_range_end [var->dreg] = use_ins;
13647 live_range_end_bb [var->dreg] = bb;
13650 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
13653 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
13654 tmp->inst_c1 = var->dreg;
13655 mono_bblock_insert_after_ins (bb, ins, tmp);
13659 mono_inst_set_src_registers (ins, sregs);
13661 if (dest_has_lvreg) {
13662 g_assert (ins->dreg != -1);
13663 vreg_to_lvreg [prev_dreg] = ins->dreg;
13664 g_assert (lvregs_len < 1024);
13665 lvregs [lvregs_len ++] = prev_dreg;
13666 dest_has_lvreg = FALSE;
13670 tmp_reg = ins->dreg;
13671 ins->dreg = ins->sreg2;
13672 ins->sreg2 = tmp_reg;
13675 if (MONO_IS_CALL (ins)) {
13676 /* Clear vreg_to_lvreg array */
13677 for (i = 0; i < lvregs_len; i++)
13678 vreg_to_lvreg [lvregs [i]] = 0;
13680 } else if (ins->opcode == OP_NOP) {
13682 MONO_INST_NULLIFY_SREGS (ins);
13685 if (cfg->verbose_level > 2)
13686 mono_print_ins_index (1, ins);
13689 /* Extend the live range based on the liveness info */
13690 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
13691 for (i = 0; i < cfg->num_varinfo; i ++) {
13692 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
13694 if (vreg_is_volatile (cfg, vi->vreg))
13695 /* The liveness info is incomplete */
13698 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
13699 /* Live from at least the first ins of this bb */
13700 live_range_start [vi->vreg] = bb->code;
13701 live_range_start_bb [vi->vreg] = bb;
13704 if (mono_bitset_test_fast (bb->live_out_set, i)) {
13705 /* Live at least until the last ins of this bb */
13706 live_range_end [vi->vreg] = bb->last_ins;
13707 live_range_end_bb [vi->vreg] = bb;
13713 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
13715 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
13716 * by storing the current native offset into MonoMethodVar->live_range_start/end.
13718 if (cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
13719 for (i = 0; i < cfg->num_varinfo; ++i) {
13720 int vreg = MONO_VARINFO (cfg, i)->vreg;
13723 if (live_range_start [vreg]) {
13724 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
13726 ins->inst_c1 = vreg;
13727 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
13729 if (live_range_end [vreg]) {
13730 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
13732 ins->inst_c1 = vreg;
13733 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
13734 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
13736 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
13742 if (cfg->gsharedvt_locals_var_ins) {
13743 /* Nullify if unused */
13744 cfg->gsharedvt_locals_var_ins->opcode = OP_PCONST;
13745 cfg->gsharedvt_locals_var_ins->inst_imm = 0;
13748 g_free (live_range_start);
13749 g_free (live_range_end);
13750 g_free (live_range_start_bb);
13751 g_free (live_range_end_bb);
13756 * - use 'iadd' instead of 'int_add'
13757 * - handling ovf opcodes: decompose in method_to_ir.
13758 * - unify iregs/fregs
13759 * -> partly done, the missing parts are:
13760 * - a more complete unification would involve unifying the hregs as well, so
13761 * code wouldn't need if (fp) all over the place. but that would mean the hregs
13762 * would no longer map to the machine hregs, so the code generators would need to
13763 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
13764 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
13765 * fp/non-fp branches speeds it up by about 15%.
13766 * - use sext/zext opcodes instead of shifts
13768 * - get rid of TEMPLOADs if possible and use vregs instead
13769 * - clean up usage of OP_P/OP_ opcodes
13770 * - cleanup usage of DUMMY_USE
13771 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
13773 * - set the stack type and allocate a dreg in the EMIT_NEW macros
13774 * - get rid of all the <foo>2 stuff when the new JIT is ready.
13775 * - make sure handle_stack_args () is called before the branch is emitted
13776 * - when the new IR is done, get rid of all unused stuff
13777 * - COMPARE/BEQ as separate instructions or unify them ?
13778 * - keeping them separate allows specialized compare instructions like
13779 * compare_imm, compare_membase
13780 * - most back ends unify fp compare+branch, fp compare+ceq
13781 * - integrate mono_save_args into inline_method
13782 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
13783 * - handle long shift opts on 32 bit platforms somehow: they require
13784 * 3 sregs (2 for arg1 and 1 for arg2)
13785 * - make byref a 'normal' type.
13786 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
13787 * variable if needed.
13788 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
13789 * like inline_method.
13790 * - remove inlining restrictions
13791 * - fix LNEG and enable cfold of INEG
13792 * - generalize x86 optimizations like ldelema as a peephole optimization
13793 * - add store_mem_imm for amd64
13794 * - optimize the loading of the interruption flag in the managed->native wrappers
13795 * - avoid special handling of OP_NOP in passes
13796 * - move code inserting instructions into one function/macro.
13797 * - try a coalescing phase after liveness analysis
13798 * - add float -> vreg conversion + local optimizations on !x86
13799 * - figure out how to handle decomposed branches during optimizations, ie.
13800 * compare+branch, op_jump_table+op_br etc.
13801 * - promote RuntimeXHandles to vregs
13802 * - vtype cleanups:
13803 * - add a NEW_VARLOADA_VREG macro
13804 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
13805 * accessing vtype fields.
13806 * - get rid of I8CONST on 64 bit platforms
13807 * - dealing with the increase in code size due to branches created during opcode
13809 * - use extended basic blocks
13810 * - all parts of the JIT
13811 * - handle_global_vregs () && local regalloc
13812 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
13813 * - sources of increase in code size:
13816 * - isinst and castclass
13817 * - lvregs not allocated to global registers even if used multiple times
13818 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
13820 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
13821 * - add all micro optimizations from the old JIT
13822 * - put tree optimizations into the deadce pass
13823 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
13824 * specific function.
13825 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
13826 * fcompare + branchCC.
13827 * - create a helper function for allocating a stack slot, taking into account
13828 * MONO_CFG_HAS_SPILLUP.
13830 * - merge the ia64 switch changes.
13831 * - optimize mono_regstate2_alloc_int/float.
13832 * - fix the pessimistic handling of variables accessed in exception handler blocks.
13833 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
13834 * parts of the tree could be separated by other instructions, killing the tree
13835 * arguments, or stores killing loads etc. Also, should we fold loads into other
13836 * instructions if the result of the load is used multiple times ?
13837 * - make the REM_IMM optimization in mini-x86.c arch-independent.
13838 * - LAST MERGE: 108395.
13839 * - when returning vtypes in registers, generate IR and append it to the end of the
13840 * last bb instead of doing it in the epilog.
13841 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
13849 - When to decompose opcodes:
13850 - earlier: this makes some optimizations hard to implement, since the low level IR
13851 no longer contains the neccessary information. But it is easier to do.
13852 - later: harder to implement, enables more optimizations.
13853 - Branches inside bblocks:
13854 - created when decomposing complex opcodes.
13855 - branches to another bblock: harmless, but not tracked by the branch
13856 optimizations, so need to branch to a label at the start of the bblock.
13857 - branches to inside the same bblock: very problematic, trips up the local
13858 reg allocator. Can be fixed by spitting the current bblock, but that is a
13859 complex operation, since some local vregs can become global vregs etc.
13860 - Local/global vregs:
13861 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
13862 local register allocator.
13863 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
13864 structure, created by mono_create_var (). Assigned to hregs or the stack by
13865 the global register allocator.
13866 - When to do optimizations like alu->alu_imm:
13867 - earlier -> saves work later on since the IR will be smaller/simpler
13868 - later -> can work on more instructions
13869 - Handling of valuetypes:
13870 - When a vtype is pushed on the stack, a new temporary is created, an
13871 instruction computing its address (LDADDR) is emitted and pushed on
13872 the stack. Need to optimize cases when the vtype is used immediately as in
13873 argument passing, stloc etc.
13874 - Instead of the to_end stuff in the old JIT, simply call the function handling
13875 the values on the stack before emitting the last instruction of the bb.
13878 #endif /* DISABLE_JIT */