2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
9 * Copyright 2003-2010 Novell, Inc (http://www.novell.com)
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
27 #ifdef HAVE_SYS_TIME_H
35 #include <mono/utils/memcheck.h>
37 #include <mono/metadata/abi-details.h>
38 #include <mono/metadata/assembly.h>
39 #include <mono/metadata/attrdefs.h>
40 #include <mono/metadata/loader.h>
41 #include <mono/metadata/tabledefs.h>
42 #include <mono/metadata/class.h>
43 #include <mono/metadata/object.h>
44 #include <mono/metadata/exception.h>
45 #include <mono/metadata/opcodes.h>
46 #include <mono/metadata/mono-endian.h>
47 #include <mono/metadata/tokentype.h>
48 #include <mono/metadata/tabledefs.h>
49 #include <mono/metadata/marshal.h>
50 #include <mono/metadata/debug-helpers.h>
51 #include <mono/metadata/mono-debug.h>
52 #include <mono/metadata/gc-internal.h>
53 #include <mono/metadata/security-manager.h>
54 #include <mono/metadata/threads-types.h>
55 #include <mono/metadata/security-core-clr.h>
56 #include <mono/metadata/monitor.h>
57 #include <mono/metadata/profiler-private.h>
58 #include <mono/metadata/profiler.h>
59 #include <mono/metadata/debug-mono-symfile.h>
60 #include <mono/utils/mono-compiler.h>
61 #include <mono/utils/mono-memory-model.h>
62 #include <mono/metadata/mono-basic-block.h>
68 #include "jit-icalls.h"
70 #include "debugger-agent.h"
71 #include "seq-points.h"
73 #define BRANCH_COST 10
74 #define INLINE_LENGTH_LIMIT 20
76 /* These have 'cfg' as an implicit argument */
77 #define INLINE_FAILURE(msg) do { \
78 if ((cfg->method != cfg->current_method) && (cfg->current_method->wrapper_type == MONO_WRAPPER_NONE)) { \
79 inline_failure (cfg, msg); \
80 goto exception_exit; \
83 #define CHECK_CFG_EXCEPTION do {\
84 if (cfg->exception_type != MONO_EXCEPTION_NONE) \
85 goto exception_exit; \
87 #define METHOD_ACCESS_FAILURE(method, cmethod) do { \
88 method_access_failure ((cfg), (method), (cmethod)); \
89 goto exception_exit; \
91 #define FIELD_ACCESS_FAILURE(method, field) do { \
92 field_access_failure ((cfg), (method), (field)); \
93 goto exception_exit; \
95 #define GENERIC_SHARING_FAILURE(opcode) do { \
97 gshared_failure (cfg, opcode, __FILE__, __LINE__); \
98 goto exception_exit; \
101 #define GSHAREDVT_FAILURE(opcode) do { \
102 if (cfg->gsharedvt) { \
103 gsharedvt_failure (cfg, opcode, __FILE__, __LINE__); \
104 goto exception_exit; \
107 #define OUT_OF_MEMORY_FAILURE do { \
108 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OUT_OF_MEMORY); \
109 goto exception_exit; \
111 #define DISABLE_AOT(cfg) do { \
112 if ((cfg)->verbose_level >= 2) \
113 printf ("AOT disabled: %s:%d\n", __FILE__, __LINE__); \
114 (cfg)->disable_aot = TRUE; \
116 #define LOAD_ERROR do { \
117 break_on_unverified (); \
118 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD); \
119 goto exception_exit; \
122 #define TYPE_LOAD_ERROR(klass) do { \
123 cfg->exception_ptr = klass; \
127 #define CHECK_CFG_ERROR do {\
128 if (!mono_error_ok (&cfg->error)) { \
129 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); \
130 goto mono_error_exit; \
134 /* Determine whenever 'ins' represents a load of the 'this' argument */
135 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
137 static int ldind_to_load_membase (int opcode);
138 static int stind_to_store_membase (int opcode);
140 int mono_op_to_op_imm (int opcode);
141 int mono_op_to_op_imm_noemul (int opcode);
143 MONO_API MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
145 static int inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
146 guchar *ip, guint real_offset, gboolean inline_always, MonoBasicBlock **out_cbb);
148 /* helper methods signatures */
149 static MonoMethodSignature *helper_sig_class_init_trampoline;
150 static MonoMethodSignature *helper_sig_domain_get;
151 static MonoMethodSignature *helper_sig_generic_class_init_trampoline;
152 static MonoMethodSignature *helper_sig_generic_class_init_trampoline_llvm;
153 static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
154 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline;
155 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline_llvm;
156 static MonoMethodSignature *helper_sig_monitor_enter_v4_trampoline_llvm;
159 * Instruction metadata
167 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
168 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
174 #if SIZEOF_REGISTER == 8 && SIZEOF_REGISTER == SIZEOF_VOID_P
179 /* keep in sync with the enum in mini.h */
182 #include "mini-ops.h"
187 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
188 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
190 * This should contain the index of the last sreg + 1. This is not the same
191 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
193 const gint8 ins_sreg_counts[] = {
194 #include "mini-ops.h"
199 #define MONO_INIT_VARINFO(vi,id) do { \
200 (vi)->range.first_use.pos.bid = 0xffff; \
206 mono_alloc_ireg (MonoCompile *cfg)
208 return alloc_ireg (cfg);
212 mono_alloc_lreg (MonoCompile *cfg)
214 return alloc_lreg (cfg);
218 mono_alloc_freg (MonoCompile *cfg)
220 return alloc_freg (cfg);
224 mono_alloc_preg (MonoCompile *cfg)
226 return alloc_preg (cfg);
230 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
232 return alloc_dreg (cfg, stack_type);
236 * mono_alloc_ireg_ref:
238 * Allocate an IREG, and mark it as holding a GC ref.
241 mono_alloc_ireg_ref (MonoCompile *cfg)
243 return alloc_ireg_ref (cfg);
247 * mono_alloc_ireg_mp:
249 * Allocate an IREG, and mark it as holding a managed pointer.
252 mono_alloc_ireg_mp (MonoCompile *cfg)
254 return alloc_ireg_mp (cfg);
258 * mono_alloc_ireg_copy:
260 * Allocate an IREG with the same GC type as VREG.
263 mono_alloc_ireg_copy (MonoCompile *cfg, guint32 vreg)
265 if (vreg_is_ref (cfg, vreg))
266 return alloc_ireg_ref (cfg);
267 else if (vreg_is_mp (cfg, vreg))
268 return alloc_ireg_mp (cfg);
270 return alloc_ireg (cfg);
274 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
279 type = mini_replace_type (type);
281 switch (type->type) {
284 case MONO_TYPE_BOOLEAN:
296 case MONO_TYPE_FNPTR:
298 case MONO_TYPE_CLASS:
299 case MONO_TYPE_STRING:
300 case MONO_TYPE_OBJECT:
301 case MONO_TYPE_SZARRAY:
302 case MONO_TYPE_ARRAY:
306 #if SIZEOF_REGISTER == 8
315 case MONO_TYPE_VALUETYPE:
316 if (type->data.klass->enumtype) {
317 type = mono_class_enum_basetype (type->data.klass);
320 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
323 case MONO_TYPE_TYPEDBYREF:
325 case MONO_TYPE_GENERICINST:
326 type = &type->data.generic_class->container_class->byval_arg;
330 g_assert (cfg->generic_sharing_context);
331 if (mini_type_var_is_vt (cfg, type))
336 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
342 mono_print_bb (MonoBasicBlock *bb, const char *msg)
347 printf ("\n%s %d: [IN: ", msg, bb->block_num);
348 for (i = 0; i < bb->in_count; ++i)
349 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
351 for (i = 0; i < bb->out_count; ++i)
352 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
354 for (tree = bb->code; tree; tree = tree->next)
355 mono_print_ins_index (-1, tree);
359 mono_create_helper_signatures (void)
361 helper_sig_domain_get = mono_create_icall_signature ("ptr");
362 helper_sig_class_init_trampoline = mono_create_icall_signature ("void");
363 helper_sig_generic_class_init_trampoline = mono_create_icall_signature ("void");
364 helper_sig_generic_class_init_trampoline_llvm = mono_create_icall_signature ("void ptr");
365 helper_sig_rgctx_lazy_fetch_trampoline = mono_create_icall_signature ("ptr ptr");
366 helper_sig_monitor_enter_exit_trampoline = mono_create_icall_signature ("void");
367 helper_sig_monitor_enter_exit_trampoline_llvm = mono_create_icall_signature ("void object");
368 helper_sig_monitor_enter_v4_trampoline_llvm = mono_create_icall_signature ("void object ptr");
371 static MONO_NEVER_INLINE void
372 break_on_unverified (void)
374 if (mini_get_debug_options ()->break_on_unverified)
378 static MONO_NEVER_INLINE void
379 method_access_failure (MonoCompile *cfg, MonoMethod *method, MonoMethod *cil_method)
381 char *method_fname = mono_method_full_name (method, TRUE);
382 char *cil_method_fname = mono_method_full_name (cil_method, TRUE);
383 mono_cfg_set_exception (cfg, MONO_EXCEPTION_METHOD_ACCESS);
384 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname);
385 g_free (method_fname);
386 g_free (cil_method_fname);
389 static MONO_NEVER_INLINE void
390 field_access_failure (MonoCompile *cfg, MonoMethod *method, MonoClassField *field)
392 char *method_fname = mono_method_full_name (method, TRUE);
393 char *field_fname = mono_field_full_name (field);
394 mono_cfg_set_exception (cfg, MONO_EXCEPTION_FIELD_ACCESS);
395 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname);
396 g_free (method_fname);
397 g_free (field_fname);
400 static MONO_NEVER_INLINE void
401 inline_failure (MonoCompile *cfg, const char *msg)
403 if (cfg->verbose_level >= 2)
404 printf ("inline failed: %s\n", msg);
405 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INLINE_FAILED);
408 static MONO_NEVER_INLINE void
409 gshared_failure (MonoCompile *cfg, int opcode, const char *file, int line)
411 if (cfg->verbose_level > 2) \
412 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), __LINE__);
413 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
416 static MONO_NEVER_INLINE void
417 gsharedvt_failure (MonoCompile *cfg, int opcode, const char *file, int line)
419 cfg->exception_message = g_strdup_printf ("gsharedvt failed for method %s.%s.%s/%d opcode %s %s:%d", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), file, line);
420 if (cfg->verbose_level >= 2)
421 printf ("%s\n", cfg->exception_message);
422 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
426 * When using gsharedvt, some instatiations might be verifiable, and some might be not. i.e.
427 * foo<T> (int i) { ldarg.0; box T; }
429 #define UNVERIFIED do { \
430 if (cfg->gsharedvt) { \
431 if (cfg->verbose_level > 2) \
432 printf ("gsharedvt method failed to verify, falling back to instantiation.\n"); \
433 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
434 goto exception_exit; \
436 break_on_unverified (); \
440 #define GET_BBLOCK(cfg,tblock,ip) do { \
441 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
443 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
444 NEW_BBLOCK (cfg, (tblock)); \
445 (tblock)->cil_code = (ip); \
446 ADD_BBLOCK (cfg, (tblock)); \
450 #if defined(TARGET_X86) || defined(TARGET_AMD64)
451 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
452 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
453 (dest)->dreg = alloc_ireg_mp ((cfg)); \
454 (dest)->sreg1 = (sr1); \
455 (dest)->sreg2 = (sr2); \
456 (dest)->inst_imm = (imm); \
457 (dest)->backend.shift_amount = (shift); \
458 MONO_ADD_INS ((cfg)->cbb, (dest)); \
462 #if SIZEOF_REGISTER == 8
463 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
464 /* FIXME: Need to add many more cases */ \
465 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
467 int dr = alloc_preg (cfg); \
468 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
469 (ins)->sreg2 = widen->dreg; \
473 #define ADD_WIDEN_OP(ins, arg1, arg2)
476 #define ADD_BINOP(op) do { \
477 MONO_INST_NEW (cfg, ins, (op)); \
479 ins->sreg1 = sp [0]->dreg; \
480 ins->sreg2 = sp [1]->dreg; \
481 type_from_op (ins, sp [0], sp [1]); \
483 /* Have to insert a widening op */ \
484 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
485 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
486 MONO_ADD_INS ((cfg)->cbb, (ins)); \
487 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
490 #define ADD_UNOP(op) do { \
491 MONO_INST_NEW (cfg, ins, (op)); \
493 ins->sreg1 = sp [0]->dreg; \
494 type_from_op (ins, sp [0], NULL); \
496 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
497 MONO_ADD_INS ((cfg)->cbb, (ins)); \
498 *sp++ = mono_decompose_opcode (cfg, ins); \
501 #define ADD_BINCOND(next_block) do { \
504 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
505 cmp->sreg1 = sp [0]->dreg; \
506 cmp->sreg2 = sp [1]->dreg; \
507 type_from_op (cmp, sp [0], sp [1]); \
509 type_from_op (ins, sp [0], sp [1]); \
510 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
511 GET_BBLOCK (cfg, tblock, target); \
512 link_bblock (cfg, bblock, tblock); \
513 ins->inst_true_bb = tblock; \
514 if ((next_block)) { \
515 link_bblock (cfg, bblock, (next_block)); \
516 ins->inst_false_bb = (next_block); \
517 start_new_bblock = 1; \
519 GET_BBLOCK (cfg, tblock, ip); \
520 link_bblock (cfg, bblock, tblock); \
521 ins->inst_false_bb = tblock; \
522 start_new_bblock = 2; \
524 if (sp != stack_start) { \
525 handle_stack_args (cfg, stack_start, sp - stack_start); \
526 CHECK_UNVERIFIABLE (cfg); \
528 MONO_ADD_INS (bblock, cmp); \
529 MONO_ADD_INS (bblock, ins); \
533 * link_bblock: Links two basic blocks
535 * links two basic blocks in the control flow graph, the 'from'
536 * argument is the starting block and the 'to' argument is the block
537 * the control flow ends to after 'from'.
540 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
542 MonoBasicBlock **newa;
546 if (from->cil_code) {
548 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
550 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
553 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
555 printf ("edge from entry to exit\n");
560 for (i = 0; i < from->out_count; ++i) {
561 if (to == from->out_bb [i]) {
567 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
568 for (i = 0; i < from->out_count; ++i) {
569 newa [i] = from->out_bb [i];
577 for (i = 0; i < to->in_count; ++i) {
578 if (from == to->in_bb [i]) {
584 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
585 for (i = 0; i < to->in_count; ++i) {
586 newa [i] = to->in_bb [i];
595 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
597 link_bblock (cfg, from, to);
601 * mono_find_block_region:
603 * We mark each basic block with a region ID. We use that to avoid BB
604 * optimizations when blocks are in different regions.
607 * A region token that encodes where this region is, and information
608 * about the clause owner for this block.
610 * The region encodes the try/catch/filter clause that owns this block
611 * as well as the type. -1 is a special value that represents a block
612 * that is in none of try/catch/filter.
615 mono_find_block_region (MonoCompile *cfg, int offset)
617 MonoMethodHeader *header = cfg->header;
618 MonoExceptionClause *clause;
621 for (i = 0; i < header->num_clauses; ++i) {
622 clause = &header->clauses [i];
623 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
624 (offset < (clause->handler_offset)))
625 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
627 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
628 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
629 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
630 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
631 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
633 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
636 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
637 return ((i + 1) << 8) | clause->flags;
644 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
646 MonoMethodHeader *header = cfg->header;
647 MonoExceptionClause *clause;
651 for (i = 0; i < header->num_clauses; ++i) {
652 clause = &header->clauses [i];
653 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
654 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
655 if (clause->flags == type)
656 res = g_list_append (res, clause);
663 mono_create_spvar_for_region (MonoCompile *cfg, int region)
667 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
671 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
672 /* prevent it from being register allocated */
673 var->flags |= MONO_INST_VOLATILE;
675 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
679 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
681 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
685 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
689 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
693 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
694 /* prevent it from being register allocated */
695 var->flags |= MONO_INST_VOLATILE;
697 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
703 * Returns the type used in the eval stack when @type is loaded.
704 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
707 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
711 type = mini_replace_type (type);
712 inst->klass = klass = mono_class_from_mono_type (type);
714 inst->type = STACK_MP;
719 switch (type->type) {
721 inst->type = STACK_INV;
725 case MONO_TYPE_BOOLEAN:
731 inst->type = STACK_I4;
736 case MONO_TYPE_FNPTR:
737 inst->type = STACK_PTR;
739 case MONO_TYPE_CLASS:
740 case MONO_TYPE_STRING:
741 case MONO_TYPE_OBJECT:
742 case MONO_TYPE_SZARRAY:
743 case MONO_TYPE_ARRAY:
744 inst->type = STACK_OBJ;
748 inst->type = STACK_I8;
752 inst->type = STACK_R8;
754 case MONO_TYPE_VALUETYPE:
755 if (type->data.klass->enumtype) {
756 type = mono_class_enum_basetype (type->data.klass);
760 inst->type = STACK_VTYPE;
763 case MONO_TYPE_TYPEDBYREF:
764 inst->klass = mono_defaults.typed_reference_class;
765 inst->type = STACK_VTYPE;
767 case MONO_TYPE_GENERICINST:
768 type = &type->data.generic_class->container_class->byval_arg;
772 g_assert (cfg->generic_sharing_context);
773 if (mini_is_gsharedvt_type (cfg, type)) {
774 g_assert (cfg->gsharedvt);
775 inst->type = STACK_VTYPE;
777 inst->type = STACK_OBJ;
781 g_error ("unknown type 0x%02x in eval stack type", type->type);
786 * The following tables are used to quickly validate the IL code in type_from_op ().
789 bin_num_table [STACK_MAX] [STACK_MAX] = {
790 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
791 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
792 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
793 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
794 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV},
795 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
796 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
797 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
802 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV
805 /* reduce the size of this table */
807 bin_int_table [STACK_MAX] [STACK_MAX] = {
808 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
809 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
810 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
811 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
812 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
813 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
814 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
815 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
819 bin_comp_table [STACK_MAX] [STACK_MAX] = {
820 /* Inv i L p F & O vt */
822 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
823 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
824 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
825 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
826 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
827 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
828 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
831 /* reduce the size of this table */
833 shift_table [STACK_MAX] [STACK_MAX] = {
834 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
835 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
836 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
837 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
838 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
839 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
840 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
841 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
845 * Tables to map from the non-specific opcode to the matching
846 * type-specific opcode.
848 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
850 binops_op_map [STACK_MAX] = {
851 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD
854 /* handles from CEE_NEG to CEE_CONV_U8 */
856 unops_op_map [STACK_MAX] = {
857 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG
860 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
862 ovfops_op_map [STACK_MAX] = {
863 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2
866 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
868 ovf2ops_op_map [STACK_MAX] = {
869 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
872 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
874 ovf3ops_op_map [STACK_MAX] = {
875 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1
878 /* handles from CEE_BEQ to CEE_BLT_UN */
880 beqops_op_map [STACK_MAX] = {
881 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ
884 /* handles from CEE_CEQ to CEE_CLT_UN */
886 ceqops_op_map [STACK_MAX] = {
887 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ
891 * Sets ins->type (the type on the eval stack) according to the
892 * type of the opcode and the arguments to it.
893 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
895 * FIXME: this function sets ins->type unconditionally in some cases, but
896 * it should set it to invalid for some types (a conv.x on an object)
899 type_from_op (MonoInst *ins, MonoInst *src1, MonoInst *src2) {
901 switch (ins->opcode) {
908 /* FIXME: check unverifiable args for STACK_MP */
909 ins->type = bin_num_table [src1->type] [src2->type];
910 ins->opcode += binops_op_map [ins->type];
917 ins->type = bin_int_table [src1->type] [src2->type];
918 ins->opcode += binops_op_map [ins->type];
923 ins->type = shift_table [src1->type] [src2->type];
924 ins->opcode += binops_op_map [ins->type];
929 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
930 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
931 ins->opcode = OP_LCOMPARE;
932 else if (src1->type == STACK_R8)
933 ins->opcode = OP_FCOMPARE;
935 ins->opcode = OP_ICOMPARE;
937 case OP_ICOMPARE_IMM:
938 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
939 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
940 ins->opcode = OP_LCOMPARE_IMM;
952 ins->opcode += beqops_op_map [src1->type];
955 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
956 ins->opcode += ceqops_op_map [src1->type];
962 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
963 ins->opcode += ceqops_op_map [src1->type];
967 ins->type = neg_table [src1->type];
968 ins->opcode += unops_op_map [ins->type];
971 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
972 ins->type = src1->type;
974 ins->type = STACK_INV;
975 ins->opcode += unops_op_map [ins->type];
981 ins->type = STACK_I4;
982 ins->opcode += unops_op_map [src1->type];
985 ins->type = STACK_R8;
986 switch (src1->type) {
989 ins->opcode = OP_ICONV_TO_R_UN;
992 ins->opcode = OP_LCONV_TO_R_UN;
996 case CEE_CONV_OVF_I1:
997 case CEE_CONV_OVF_U1:
998 case CEE_CONV_OVF_I2:
999 case CEE_CONV_OVF_U2:
1000 case CEE_CONV_OVF_I4:
1001 case CEE_CONV_OVF_U4:
1002 ins->type = STACK_I4;
1003 ins->opcode += ovf3ops_op_map [src1->type];
1005 case CEE_CONV_OVF_I_UN:
1006 case CEE_CONV_OVF_U_UN:
1007 ins->type = STACK_PTR;
1008 ins->opcode += ovf2ops_op_map [src1->type];
1010 case CEE_CONV_OVF_I1_UN:
1011 case CEE_CONV_OVF_I2_UN:
1012 case CEE_CONV_OVF_I4_UN:
1013 case CEE_CONV_OVF_U1_UN:
1014 case CEE_CONV_OVF_U2_UN:
1015 case CEE_CONV_OVF_U4_UN:
1016 ins->type = STACK_I4;
1017 ins->opcode += ovf2ops_op_map [src1->type];
1020 ins->type = STACK_PTR;
1021 switch (src1->type) {
1023 ins->opcode = OP_ICONV_TO_U;
1027 #if SIZEOF_VOID_P == 8
1028 ins->opcode = OP_LCONV_TO_U;
1030 ins->opcode = OP_MOVE;
1034 ins->opcode = OP_LCONV_TO_U;
1037 ins->opcode = OP_FCONV_TO_U;
1043 ins->type = STACK_I8;
1044 ins->opcode += unops_op_map [src1->type];
1046 case CEE_CONV_OVF_I8:
1047 case CEE_CONV_OVF_U8:
1048 ins->type = STACK_I8;
1049 ins->opcode += ovf3ops_op_map [src1->type];
1051 case CEE_CONV_OVF_U8_UN:
1052 case CEE_CONV_OVF_I8_UN:
1053 ins->type = STACK_I8;
1054 ins->opcode += ovf2ops_op_map [src1->type];
1058 ins->type = STACK_R8;
1059 ins->opcode += unops_op_map [src1->type];
1062 ins->type = STACK_R8;
1066 ins->type = STACK_I4;
1067 ins->opcode += ovfops_op_map [src1->type];
1070 case CEE_CONV_OVF_I:
1071 case CEE_CONV_OVF_U:
1072 ins->type = STACK_PTR;
1073 ins->opcode += ovfops_op_map [src1->type];
1076 case CEE_ADD_OVF_UN:
1078 case CEE_MUL_OVF_UN:
1080 case CEE_SUB_OVF_UN:
1081 ins->type = bin_num_table [src1->type] [src2->type];
1082 ins->opcode += ovfops_op_map [src1->type];
1083 if (ins->type == STACK_R8)
1084 ins->type = STACK_INV;
1086 case OP_LOAD_MEMBASE:
1087 ins->type = STACK_PTR;
1089 case OP_LOADI1_MEMBASE:
1090 case OP_LOADU1_MEMBASE:
1091 case OP_LOADI2_MEMBASE:
1092 case OP_LOADU2_MEMBASE:
1093 case OP_LOADI4_MEMBASE:
1094 case OP_LOADU4_MEMBASE:
1095 ins->type = STACK_PTR;
1097 case OP_LOADI8_MEMBASE:
1098 ins->type = STACK_I8;
1100 case OP_LOADR4_MEMBASE:
1101 case OP_LOADR8_MEMBASE:
1102 ins->type = STACK_R8;
1105 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
1109 if (ins->type == STACK_MP)
1110 ins->klass = mono_defaults.object_class;
1115 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
1121 param_table [STACK_MAX] [STACK_MAX] = {
1126 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
1130 switch (args->type) {
1140 for (i = 0; i < sig->param_count; ++i) {
1141 switch (args [i].type) {
1145 if (!sig->params [i]->byref)
1149 if (sig->params [i]->byref)
1151 switch (sig->params [i]->type) {
1152 case MONO_TYPE_CLASS:
1153 case MONO_TYPE_STRING:
1154 case MONO_TYPE_OBJECT:
1155 case MONO_TYPE_SZARRAY:
1156 case MONO_TYPE_ARRAY:
1163 if (sig->params [i]->byref)
1165 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1174 /*if (!param_table [args [i].type] [sig->params [i]->type])
1182 * When we need a pointer to the current domain many times in a method, we
1183 * call mono_domain_get() once and we store the result in a local variable.
1184 * This function returns the variable that represents the MonoDomain*.
1186 inline static MonoInst *
1187 mono_get_domainvar (MonoCompile *cfg)
1189 if (!cfg->domainvar)
1190 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1191 return cfg->domainvar;
1195 * The got_var contains the address of the Global Offset Table when AOT
1199 mono_get_got_var (MonoCompile *cfg)
1201 #ifdef MONO_ARCH_NEED_GOT_VAR
1202 if (!cfg->compile_aot)
1204 if (!cfg->got_var) {
1205 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1207 return cfg->got_var;
1214 mono_get_vtable_var (MonoCompile *cfg)
1216 g_assert (cfg->generic_sharing_context);
1218 if (!cfg->rgctx_var) {
1219 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1220 /* force the var to be stack allocated */
1221 cfg->rgctx_var->flags |= MONO_INST_VOLATILE;
1224 return cfg->rgctx_var;
1228 type_from_stack_type (MonoInst *ins) {
1229 switch (ins->type) {
1230 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1231 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1232 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1233 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1235 return &ins->klass->this_arg;
1236 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1237 case STACK_VTYPE: return &ins->klass->byval_arg;
1239 g_error ("stack type %d to monotype not handled\n", ins->type);
1244 static G_GNUC_UNUSED int
1245 type_to_stack_type (MonoType *t)
1247 t = mono_type_get_underlying_type (t);
1251 case MONO_TYPE_BOOLEAN:
1254 case MONO_TYPE_CHAR:
1261 case MONO_TYPE_FNPTR:
1263 case MONO_TYPE_CLASS:
1264 case MONO_TYPE_STRING:
1265 case MONO_TYPE_OBJECT:
1266 case MONO_TYPE_SZARRAY:
1267 case MONO_TYPE_ARRAY:
1275 case MONO_TYPE_VALUETYPE:
1276 case MONO_TYPE_TYPEDBYREF:
1278 case MONO_TYPE_GENERICINST:
1279 if (mono_type_generic_inst_is_valuetype (t))
1285 g_assert_not_reached ();
1292 array_access_to_klass (int opcode)
1296 return mono_defaults.byte_class;
1298 return mono_defaults.uint16_class;
1301 return mono_defaults.int_class;
1304 return mono_defaults.sbyte_class;
1307 return mono_defaults.int16_class;
1310 return mono_defaults.int32_class;
1312 return mono_defaults.uint32_class;
1315 return mono_defaults.int64_class;
1318 return mono_defaults.single_class;
1321 return mono_defaults.double_class;
1322 case CEE_LDELEM_REF:
1323 case CEE_STELEM_REF:
1324 return mono_defaults.object_class;
1326 g_assert_not_reached ();
1332 * We try to share variables when possible
1335 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1340 /* inlining can result in deeper stacks */
1341 if (slot >= cfg->header->max_stack)
1342 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1344 pos = ins->type - 1 + slot * STACK_MAX;
1346 switch (ins->type) {
1353 if ((vnum = cfg->intvars [pos]))
1354 return cfg->varinfo [vnum];
1355 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1356 cfg->intvars [pos] = res->inst_c0;
1359 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1365 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1368 * Don't use this if a generic_context is set, since that means AOT can't
1369 * look up the method using just the image+token.
1370 * table == 0 means this is a reference made from a wrapper.
1372 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1373 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1374 jump_info_token->image = image;
1375 jump_info_token->token = token;
1376 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1381 * This function is called to handle items that are left on the evaluation stack
1382 * at basic block boundaries. What happens is that we save the values to local variables
1383 * and we reload them later when first entering the target basic block (with the
1384 * handle_loaded_temps () function).
1385 * A single joint point will use the same variables (stored in the array bb->out_stack or
1386 * bb->in_stack, if the basic block is before or after the joint point).
1388 * This function needs to be called _before_ emitting the last instruction of
1389 * the bb (i.e. before emitting a branch).
1390 * If the stack merge fails at a join point, cfg->unverifiable is set.
1393 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1396 MonoBasicBlock *bb = cfg->cbb;
1397 MonoBasicBlock *outb;
1398 MonoInst *inst, **locals;
1403 if (cfg->verbose_level > 3)
1404 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1405 if (!bb->out_scount) {
1406 bb->out_scount = count;
1407 //printf ("bblock %d has out:", bb->block_num);
1409 for (i = 0; i < bb->out_count; ++i) {
1410 outb = bb->out_bb [i];
1411 /* exception handlers are linked, but they should not be considered for stack args */
1412 if (outb->flags & BB_EXCEPTION_HANDLER)
1414 //printf (" %d", outb->block_num);
1415 if (outb->in_stack) {
1417 bb->out_stack = outb->in_stack;
1423 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1424 for (i = 0; i < count; ++i) {
1426 * try to reuse temps already allocated for this purpouse, if they occupy the same
1427 * stack slot and if they are of the same type.
1428 * This won't cause conflicts since if 'local' is used to
1429 * store one of the values in the in_stack of a bblock, then
1430 * the same variable will be used for the same outgoing stack
1432 * This doesn't work when inlining methods, since the bblocks
1433 * in the inlined methods do not inherit their in_stack from
1434 * the bblock they are inlined to. See bug #58863 for an
1437 if (cfg->inlined_method)
1438 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1440 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1445 for (i = 0; i < bb->out_count; ++i) {
1446 outb = bb->out_bb [i];
1447 /* exception handlers are linked, but they should not be considered for stack args */
1448 if (outb->flags & BB_EXCEPTION_HANDLER)
1450 if (outb->in_scount) {
1451 if (outb->in_scount != bb->out_scount) {
1452 cfg->unverifiable = TRUE;
1455 continue; /* check they are the same locals */
1457 outb->in_scount = count;
1458 outb->in_stack = bb->out_stack;
1461 locals = bb->out_stack;
1463 for (i = 0; i < count; ++i) {
1464 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1465 inst->cil_code = sp [i]->cil_code;
1466 sp [i] = locals [i];
1467 if (cfg->verbose_level > 3)
1468 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1472 * It is possible that the out bblocks already have in_stack assigned, and
1473 * the in_stacks differ. In this case, we will store to all the different
1480 /* Find a bblock which has a different in_stack */
1482 while (bindex < bb->out_count) {
1483 outb = bb->out_bb [bindex];
1484 /* exception handlers are linked, but they should not be considered for stack args */
1485 if (outb->flags & BB_EXCEPTION_HANDLER) {
1489 if (outb->in_stack != locals) {
1490 for (i = 0; i < count; ++i) {
1491 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1492 inst->cil_code = sp [i]->cil_code;
1493 sp [i] = locals [i];
1494 if (cfg->verbose_level > 3)
1495 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1497 locals = outb->in_stack;
1506 /* Emit code which loads interface_offsets [klass->interface_id]
1507 * The array is stored in memory before vtable.
1510 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1512 if (cfg->compile_aot) {
1513 int ioffset_reg = alloc_preg (cfg);
1514 int iid_reg = alloc_preg (cfg);
1516 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1517 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1518 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1521 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1526 mini_emit_interface_bitmap_check (MonoCompile *cfg, int intf_bit_reg, int base_reg, int offset, MonoClass *klass)
1528 int ibitmap_reg = alloc_preg (cfg);
1529 #ifdef COMPRESSED_INTERFACE_BITMAP
1531 MonoInst *res, *ins;
1532 NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, ibitmap_reg, base_reg, offset);
1533 MONO_ADD_INS (cfg->cbb, ins);
1535 if (cfg->compile_aot)
1536 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_IID, klass);
1538 EMIT_NEW_ICONST (cfg, args [1], klass->interface_id);
1539 res = mono_emit_jit_icall (cfg, mono_class_interface_match, args);
1540 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, intf_bit_reg, res->dreg);
1542 int ibitmap_byte_reg = alloc_preg (cfg);
1544 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, base_reg, offset);
1546 if (cfg->compile_aot) {
1547 int iid_reg = alloc_preg (cfg);
1548 int shifted_iid_reg = alloc_preg (cfg);
1549 int ibitmap_byte_address_reg = alloc_preg (cfg);
1550 int masked_iid_reg = alloc_preg (cfg);
1551 int iid_one_bit_reg = alloc_preg (cfg);
1552 int iid_bit_reg = alloc_preg (cfg);
1553 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1554 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1555 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1556 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1557 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1558 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1559 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1560 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1562 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1563 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1569 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1570 * stored in "klass_reg" implements the interface "klass".
1573 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1575 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, interface_bitmap), klass);
1579 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1580 * stored in "vtable_reg" implements the interface "klass".
1583 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1585 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, interface_bitmap), klass);
1589 * Emit code which checks whenever the interface id of @klass is smaller than
1590 * than the value given by max_iid_reg.
1593 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1594 MonoBasicBlock *false_target)
1596 if (cfg->compile_aot) {
1597 int iid_reg = alloc_preg (cfg);
1598 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1599 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1602 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1604 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1606 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1609 /* Same as above, but obtains max_iid from a vtable */
1611 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1612 MonoBasicBlock *false_target)
1614 int max_iid_reg = alloc_preg (cfg);
1616 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, max_interface_id));
1617 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1620 /* Same as above, but obtains max_iid from a klass */
1622 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1623 MonoBasicBlock *false_target)
1625 int max_iid_reg = alloc_preg (cfg);
1627 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, max_interface_id));
1628 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1632 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1634 int idepth_reg = alloc_preg (cfg);
1635 int stypes_reg = alloc_preg (cfg);
1636 int stype = alloc_preg (cfg);
1638 mono_class_setup_supertypes (klass);
1640 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1641 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, idepth));
1642 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1643 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1645 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, supertypes));
1646 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1648 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1649 } else if (cfg->compile_aot) {
1650 int const_reg = alloc_preg (cfg);
1651 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1652 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1654 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1656 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1660 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1662 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1666 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1668 int intf_reg = alloc_preg (cfg);
1670 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1671 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1672 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1674 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1676 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1680 * Variant of the above that takes a register to the class, not the vtable.
1683 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1685 int intf_bit_reg = alloc_preg (cfg);
1687 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1688 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1689 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1691 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1693 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1697 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1700 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1701 } else if (cfg->compile_aot) {
1702 int const_reg = alloc_preg (cfg);
1703 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1704 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1706 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1708 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1712 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1714 mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1718 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1720 if (cfg->compile_aot) {
1721 int const_reg = alloc_preg (cfg);
1722 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1723 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1725 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1727 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1731 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1734 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1737 int rank_reg = alloc_preg (cfg);
1738 int eclass_reg = alloc_preg (cfg);
1740 g_assert (!klass_inst);
1741 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, rank));
1742 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1743 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1744 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
1745 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, cast_class));
1746 if (klass->cast_class == mono_defaults.object_class) {
1747 int parent_reg = alloc_preg (cfg);
1748 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, MONO_STRUCT_OFFSET (MonoClass, parent));
1749 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1750 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1751 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1752 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1753 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1754 } else if (klass->cast_class == mono_defaults.enum_class) {
1755 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1756 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1757 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1759 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1760 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1763 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1764 /* Check that the object is a vector too */
1765 int bounds_reg = alloc_preg (cfg);
1766 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, MONO_STRUCT_OFFSET (MonoArray, bounds));
1767 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1768 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1771 int idepth_reg = alloc_preg (cfg);
1772 int stypes_reg = alloc_preg (cfg);
1773 int stype = alloc_preg (cfg);
1775 mono_class_setup_supertypes (klass);
1777 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1778 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, idepth));
1779 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1780 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1782 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, supertypes));
1783 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1784 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1789 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1791 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1795 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1799 g_assert (val == 0);
1804 if ((size <= SIZEOF_REGISTER) && (size <= align)) {
1807 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1810 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1813 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1815 #if SIZEOF_REGISTER == 8
1817 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1823 val_reg = alloc_preg (cfg);
1825 if (SIZEOF_REGISTER == 8)
1826 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1828 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1831 /* This could be optimized further if neccesary */
1833 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1840 #if !NO_UNALIGNED_ACCESS
1841 if (SIZEOF_REGISTER == 8) {
1843 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1848 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1856 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1861 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1866 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1873 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1880 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1881 g_assert (size < 10000);
1884 /* This could be optimized further if neccesary */
1886 cur_reg = alloc_preg (cfg);
1887 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1888 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1895 #if !NO_UNALIGNED_ACCESS
1896 if (SIZEOF_REGISTER == 8) {
1898 cur_reg = alloc_preg (cfg);
1899 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1900 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1909 cur_reg = alloc_preg (cfg);
1910 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1911 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1917 cur_reg = alloc_preg (cfg);
1918 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1919 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1925 cur_reg = alloc_preg (cfg);
1926 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1927 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1935 emit_tls_set (MonoCompile *cfg, int sreg1, int tls_key)
1939 if (cfg->compile_aot) {
1940 EMIT_NEW_TLS_OFFSETCONST (cfg, c, tls_key);
1941 MONO_INST_NEW (cfg, ins, OP_TLS_SET_REG);
1943 ins->sreg2 = c->dreg;
1944 MONO_ADD_INS (cfg->cbb, ins);
1946 MONO_INST_NEW (cfg, ins, OP_TLS_SET);
1948 ins->inst_offset = mini_get_tls_offset (tls_key);
1949 MONO_ADD_INS (cfg->cbb, ins);
1956 * Emit IR to push the current LMF onto the LMF stack.
1959 emit_push_lmf (MonoCompile *cfg)
1962 * Emit IR to push the LMF:
1963 * lmf_addr = <lmf_addr from tls>
1964 * lmf->lmf_addr = lmf_addr
1965 * lmf->prev_lmf = *lmf_addr
1968 int lmf_reg, prev_lmf_reg;
1969 MonoInst *ins, *lmf_ins;
1974 if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
1975 /* Load current lmf */
1976 lmf_ins = mono_get_lmf_intrinsic (cfg);
1978 MONO_ADD_INS (cfg->cbb, lmf_ins);
1979 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
1980 lmf_reg = ins->dreg;
1981 /* Save previous_lmf */
1982 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), lmf_ins->dreg);
1984 emit_tls_set (cfg, lmf_reg, TLS_KEY_LMF);
1987 * Store lmf_addr in a variable, so it can be allocated to a global register.
1989 if (!cfg->lmf_addr_var)
1990 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1993 ins = mono_get_jit_tls_intrinsic (cfg);
1995 int jit_tls_dreg = ins->dreg;
1997 MONO_ADD_INS (cfg->cbb, ins);
1998 lmf_reg = alloc_preg (cfg);
1999 EMIT_NEW_BIALU_IMM (cfg, lmf_ins, OP_PADD_IMM, lmf_reg, jit_tls_dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
2001 lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
2004 lmf_ins = mono_get_lmf_addr_intrinsic (cfg);
2006 MONO_ADD_INS (cfg->cbb, lmf_ins);
2009 MonoInst *args [16], *jit_tls_ins, *ins;
2011 /* Inline mono_get_lmf_addr () */
2012 /* jit_tls = pthread_getspecific (mono_jit_tls_id); lmf_addr = &jit_tls->lmf; */
2014 /* Load mono_jit_tls_id */
2015 EMIT_NEW_AOTCONST (cfg, args [0], MONO_PATCH_INFO_JIT_TLS_ID, NULL);
2016 /* call pthread_getspecific () */
2017 jit_tls_ins = mono_emit_jit_icall (cfg, pthread_getspecific, args);
2018 /* lmf_addr = &jit_tls->lmf */
2019 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, cfg->lmf_addr_var->dreg, jit_tls_ins->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
2022 lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
2026 lmf_ins->dreg = cfg->lmf_addr_var->dreg;
2028 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
2029 lmf_reg = ins->dreg;
2031 prev_lmf_reg = alloc_preg (cfg);
2032 /* Save previous_lmf */
2033 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, cfg->lmf_addr_var->dreg, 0);
2034 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), prev_lmf_reg);
2036 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, cfg->lmf_addr_var->dreg, 0, lmf_reg);
2043 * Emit IR to pop the current LMF from the LMF stack.
2046 emit_pop_lmf (MonoCompile *cfg)
2048 int lmf_reg, lmf_addr_reg, prev_lmf_reg;
2054 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
2055 lmf_reg = ins->dreg;
2057 if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
2058 /* Load previous_lmf */
2059 prev_lmf_reg = alloc_preg (cfg);
2060 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
2062 emit_tls_set (cfg, prev_lmf_reg, TLS_KEY_LMF);
2065 * Emit IR to pop the LMF:
2066 * *(lmf->lmf_addr) = lmf->prev_lmf
2068 /* This could be called before emit_push_lmf () */
2069 if (!cfg->lmf_addr_var)
2070 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2071 lmf_addr_reg = cfg->lmf_addr_var->dreg;
2073 prev_lmf_reg = alloc_preg (cfg);
2074 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
2075 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_addr_reg, 0, prev_lmf_reg);
2080 emit_instrumentation_call (MonoCompile *cfg, void *func)
2082 MonoInst *iargs [1];
2085 * Avoid instrumenting inlined methods since it can
2086 * distort profiling results.
2088 if (cfg->method != cfg->current_method)
2091 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE) {
2092 EMIT_NEW_METHODCONST (cfg, iargs [0], cfg->method);
2093 mono_emit_jit_icall (cfg, func, iargs);
2098 ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
2101 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2104 type = mini_get_basic_type_from_generic (gsctx, type);
2105 type = mini_replace_type (type);
2106 switch (type->type) {
2107 case MONO_TYPE_VOID:
2108 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALL_MEMBASE: OP_VOIDCALL;
2111 case MONO_TYPE_BOOLEAN:
2114 case MONO_TYPE_CHAR:
2117 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2121 case MONO_TYPE_FNPTR:
2122 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2123 case MONO_TYPE_CLASS:
2124 case MONO_TYPE_STRING:
2125 case MONO_TYPE_OBJECT:
2126 case MONO_TYPE_SZARRAY:
2127 case MONO_TYPE_ARRAY:
2128 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2131 return calli? OP_LCALL_REG: virt? OP_LCALL_MEMBASE: OP_LCALL;
2134 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
2135 case MONO_TYPE_VALUETYPE:
2136 if (type->data.klass->enumtype) {
2137 type = mono_class_enum_basetype (type->data.klass);
2140 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2141 case MONO_TYPE_TYPEDBYREF:
2142 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2143 case MONO_TYPE_GENERICINST:
2144 type = &type->data.generic_class->container_class->byval_arg;
2147 case MONO_TYPE_MVAR:
2149 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2151 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
2157 * target_type_is_incompatible:
2158 * @cfg: MonoCompile context
2160 * Check that the item @arg on the evaluation stack can be stored
2161 * in the target type (can be a local, or field, etc).
2162 * The cfg arg can be used to check if we need verification or just
2165 * Returns: non-0 value if arg can't be stored on a target.
2168 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
2170 MonoType *simple_type;
2173 target = mini_replace_type (target);
2174 if (target->byref) {
2175 /* FIXME: check that the pointed to types match */
2176 if (arg->type == STACK_MP)
2177 return arg->klass != mono_class_from_mono_type (target);
2178 if (arg->type == STACK_PTR)
2183 simple_type = mono_type_get_underlying_type (target);
2184 switch (simple_type->type) {
2185 case MONO_TYPE_VOID:
2189 case MONO_TYPE_BOOLEAN:
2192 case MONO_TYPE_CHAR:
2195 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
2199 /* STACK_MP is needed when setting pinned locals */
2200 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2205 case MONO_TYPE_FNPTR:
2207 * Some opcodes like ldloca returns 'transient pointers' which can be stored in
2208 * in native int. (#688008).
2210 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2213 case MONO_TYPE_CLASS:
2214 case MONO_TYPE_STRING:
2215 case MONO_TYPE_OBJECT:
2216 case MONO_TYPE_SZARRAY:
2217 case MONO_TYPE_ARRAY:
2218 if (arg->type != STACK_OBJ)
2220 /* FIXME: check type compatibility */
2224 if (arg->type != STACK_I8)
2229 if (arg->type != STACK_R8)
2232 case MONO_TYPE_VALUETYPE:
2233 if (arg->type != STACK_VTYPE)
2235 klass = mono_class_from_mono_type (simple_type);
2236 if (klass != arg->klass)
2239 case MONO_TYPE_TYPEDBYREF:
2240 if (arg->type != STACK_VTYPE)
2242 klass = mono_class_from_mono_type (simple_type);
2243 if (klass != arg->klass)
2246 case MONO_TYPE_GENERICINST:
2247 if (mono_type_generic_inst_is_valuetype (simple_type)) {
2248 if (arg->type != STACK_VTYPE)
2250 klass = mono_class_from_mono_type (simple_type);
2251 if (klass != arg->klass)
2255 if (arg->type != STACK_OBJ)
2257 /* FIXME: check type compatibility */
2261 case MONO_TYPE_MVAR:
2262 g_assert (cfg->generic_sharing_context);
2263 if (mini_type_var_is_vt (cfg, simple_type)) {
2264 if (arg->type != STACK_VTYPE)
2267 if (arg->type != STACK_OBJ)
2272 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
2278 * Prepare arguments for passing to a function call.
2279 * Return a non-zero value if the arguments can't be passed to the given
2281 * The type checks are not yet complete and some conversions may need
2282 * casts on 32 or 64 bit architectures.
2284 * FIXME: implement this using target_type_is_incompatible ()
2287 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
2289 MonoType *simple_type;
2293 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
2297 for (i = 0; i < sig->param_count; ++i) {
2298 if (sig->params [i]->byref) {
2299 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
2303 simple_type = sig->params [i];
2304 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
2306 switch (simple_type->type) {
2307 case MONO_TYPE_VOID:
2312 case MONO_TYPE_BOOLEAN:
2315 case MONO_TYPE_CHAR:
2318 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2324 case MONO_TYPE_FNPTR:
2325 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2328 case MONO_TYPE_CLASS:
2329 case MONO_TYPE_STRING:
2330 case MONO_TYPE_OBJECT:
2331 case MONO_TYPE_SZARRAY:
2332 case MONO_TYPE_ARRAY:
2333 if (args [i]->type != STACK_OBJ)
2338 if (args [i]->type != STACK_I8)
2343 if (args [i]->type != STACK_R8)
2346 case MONO_TYPE_VALUETYPE:
2347 if (simple_type->data.klass->enumtype) {
2348 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2351 if (args [i]->type != STACK_VTYPE)
2354 case MONO_TYPE_TYPEDBYREF:
2355 if (args [i]->type != STACK_VTYPE)
2358 case MONO_TYPE_GENERICINST:
2359 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2362 case MONO_TYPE_MVAR:
2364 if (args [i]->type != STACK_VTYPE)
2368 g_error ("unknown type 0x%02x in check_call_signature",
2376 callvirt_to_call (int opcode)
2379 case OP_CALL_MEMBASE:
2381 case OP_VOIDCALL_MEMBASE:
2383 case OP_FCALL_MEMBASE:
2385 case OP_VCALL_MEMBASE:
2387 case OP_LCALL_MEMBASE:
2390 g_assert_not_reached ();
2396 /* Either METHOD or IMT_ARG needs to be set */
2398 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoMethod *method, MonoInst *imt_arg)
2402 if (COMPILE_LLVM (cfg)) {
2403 method_reg = alloc_preg (cfg);
2406 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2407 } else if (cfg->compile_aot) {
2408 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, method, MONO_PATCH_INFO_METHODCONST);
2411 MONO_INST_NEW (cfg, ins, OP_PCONST);
2412 ins->inst_p0 = method;
2413 ins->dreg = method_reg;
2414 MONO_ADD_INS (cfg->cbb, ins);
2418 call->imt_arg_reg = method_reg;
2420 #ifdef MONO_ARCH_IMT_REG
2421 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2423 /* Need this to keep the IMT arg alive */
2424 mono_call_inst_add_outarg_reg (cfg, call, method_reg, 0, FALSE);
2429 #ifdef MONO_ARCH_IMT_REG
2430 method_reg = alloc_preg (cfg);
2433 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2434 } else if (cfg->compile_aot) {
2435 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, method, MONO_PATCH_INFO_METHODCONST);
2438 MONO_INST_NEW (cfg, ins, OP_PCONST);
2439 ins->inst_p0 = method;
2440 ins->dreg = method_reg;
2441 MONO_ADD_INS (cfg->cbb, ins);
2444 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2446 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2450 static MonoJumpInfo *
2451 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2453 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2457 ji->data.target = target;
2463 mini_class_check_context_used (MonoCompile *cfg, MonoClass *klass)
2465 if (cfg->generic_sharing_context)
2466 return mono_class_check_context_used (klass);
2472 mini_method_check_context_used (MonoCompile *cfg, MonoMethod *method)
2474 if (cfg->generic_sharing_context)
2475 return mono_method_check_context_used (method);
2481 * check_method_sharing:
2483 * Check whenever the vtable or an mrgctx needs to be passed when calling CMETHOD.
2486 check_method_sharing (MonoCompile *cfg, MonoMethod *cmethod, gboolean *out_pass_vtable, gboolean *out_pass_mrgctx)
2488 gboolean pass_vtable = FALSE;
2489 gboolean pass_mrgctx = FALSE;
2491 if (((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
2492 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
2493 gboolean sharable = FALSE;
2495 if (mono_method_is_generic_sharable (cmethod, TRUE)) {
2498 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
2499 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
2500 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
2502 sharable = sharing_enabled && context_sharable;
2506 * Pass vtable iff target method might
2507 * be shared, which means that sharing
2508 * is enabled for its class and its
2509 * context is sharable (and it's not a
2512 if (sharable && !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
2516 if (mini_method_get_context (cmethod) &&
2517 mini_method_get_context (cmethod)->method_inst) {
2518 g_assert (!pass_vtable);
2520 if (mono_method_is_generic_sharable (cmethod, TRUE)) {
2523 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
2524 MonoGenericContext *context = mini_method_get_context (cmethod);
2525 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
2527 if (sharing_enabled && context_sharable)
2529 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, mono_method_signature (cmethod)))
2534 if (out_pass_vtable)
2535 *out_pass_vtable = pass_vtable;
2536 if (out_pass_mrgctx)
2537 *out_pass_mrgctx = pass_mrgctx;
2540 inline static MonoCallInst *
2541 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2542 MonoInst **args, int calli, int virtual, int tail, int rgctx, int unbox_trampoline)
2546 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2551 emit_instrumentation_call (cfg, mono_profiler_method_leave);
2553 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2555 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
2558 call->signature = sig;
2559 call->rgctx_reg = rgctx;
2560 sig_ret = mini_replace_type (sig->ret);
2562 type_to_eval_stack_type ((cfg), sig_ret, &call->inst);
2565 if (mini_type_is_vtype (cfg, sig_ret)) {
2566 call->vret_var = cfg->vret_addr;
2567 //g_assert_not_reached ();
2569 } else if (mini_type_is_vtype (cfg, sig_ret)) {
2570 MonoInst *temp = mono_compile_create_var (cfg, sig_ret, OP_LOCAL);
2573 temp->backend.is_pinvoke = sig->pinvoke;
2576 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2577 * address of return value to increase optimization opportunities.
2578 * Before vtype decomposition, the dreg of the call ins itself represents the
2579 * fact the call modifies the return value. After decomposition, the call will
2580 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2581 * will be transformed into an LDADDR.
2583 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2584 loada->dreg = alloc_preg (cfg);
2585 loada->inst_p0 = temp;
2586 /* We reference the call too since call->dreg could change during optimization */
2587 loada->inst_p1 = call;
2588 MONO_ADD_INS (cfg->cbb, loada);
2590 call->inst.dreg = temp->dreg;
2592 call->vret_var = loada;
2593 } else if (!MONO_TYPE_IS_VOID (sig_ret))
2594 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2596 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2597 if (COMPILE_SOFT_FLOAT (cfg)) {
2599 * If the call has a float argument, we would need to do an r8->r4 conversion using
2600 * an icall, but that cannot be done during the call sequence since it would clobber
2601 * the call registers + the stack. So we do it before emitting the call.
2603 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2605 MonoInst *in = call->args [i];
2607 if (i >= sig->hasthis)
2608 t = sig->params [i - sig->hasthis];
2610 t = &mono_defaults.int_class->byval_arg;
2611 t = mono_type_get_underlying_type (t);
2613 if (!t->byref && t->type == MONO_TYPE_R4) {
2614 MonoInst *iargs [1];
2618 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2620 /* The result will be in an int vreg */
2621 call->args [i] = conv;
2627 call->need_unbox_trampoline = unbox_trampoline;
2630 if (COMPILE_LLVM (cfg))
2631 mono_llvm_emit_call (cfg, call);
2633 mono_arch_emit_call (cfg, call);
2635 mono_arch_emit_call (cfg, call);
2638 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2639 cfg->flags |= MONO_CFG_HAS_CALLS;
2645 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2647 #ifdef MONO_ARCH_RGCTX_REG
2648 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2649 cfg->uses_rgctx_reg = TRUE;
2650 call->rgctx_reg = TRUE;
2652 call->rgctx_arg_reg = rgctx_reg;
2659 inline static MonoInst*
2660 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *imt_arg, MonoInst *rgctx_arg)
2665 gboolean check_sp = FALSE;
2667 if (cfg->check_pinvoke_callconv && cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
2668 WrapperInfo *info = mono_marshal_get_wrapper_info (cfg->method);
2670 if (info && info->subtype == WRAPPER_SUBTYPE_PINVOKE)
2675 rgctx_reg = mono_alloc_preg (cfg);
2676 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2680 if (!cfg->stack_inbalance_var)
2681 cfg->stack_inbalance_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2683 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2684 ins->dreg = cfg->stack_inbalance_var->dreg;
2685 MONO_ADD_INS (cfg->cbb, ins);
2688 call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE, rgctx_arg ? TRUE : FALSE, FALSE);
2690 call->inst.sreg1 = addr->dreg;
2693 emit_imt_argument (cfg, call, NULL, imt_arg);
2695 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2700 sp_reg = mono_alloc_preg (cfg);
2702 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2704 MONO_ADD_INS (cfg->cbb, ins);
2706 /* Restore the stack so we don't crash when throwing the exception */
2707 MONO_INST_NEW (cfg, ins, OP_SET_SP);
2708 ins->sreg1 = cfg->stack_inbalance_var->dreg;
2709 MONO_ADD_INS (cfg->cbb, ins);
2711 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, cfg->stack_inbalance_var->dreg, sp_reg);
2712 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ExecutionEngineException");
2716 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2718 return (MonoInst*)call;
2722 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2725 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type);
2727 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2730 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig, gboolean tail,
2731 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *rgctx_arg)
2733 #ifndef DISABLE_REMOTING
2734 gboolean might_be_remote = FALSE;
2736 gboolean virtual = this != NULL;
2737 gboolean enable_for_aot = TRUE;
2741 gboolean need_unbox_trampoline;
2744 sig = mono_method_signature (method);
2747 rgctx_reg = mono_alloc_preg (cfg);
2748 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2751 if (method->string_ctor) {
2752 /* Create the real signature */
2753 /* FIXME: Cache these */
2754 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2755 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2760 context_used = mini_method_check_context_used (cfg, method);
2762 #ifndef DISABLE_REMOTING
2763 might_be_remote = this && sig->hasthis &&
2764 (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) &&
2765 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && (!MONO_CHECK_THIS (this) || context_used);
2767 if (might_be_remote && context_used) {
2770 g_assert (cfg->generic_sharing_context);
2772 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2774 return mono_emit_calli (cfg, sig, args, addr, NULL, NULL);
2778 need_unbox_trampoline = method->klass == mono_defaults.object_class || (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE);
2780 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, tail, rgctx_arg ? TRUE : FALSE, need_unbox_trampoline);
2782 #ifndef DISABLE_REMOTING
2783 if (might_be_remote)
2784 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2787 call->method = method;
2788 call->inst.flags |= MONO_INST_HAS_METHOD;
2789 call->inst.inst_left = this;
2790 call->tail_call = tail;
2793 int vtable_reg, slot_reg, this_reg;
2796 this_reg = this->dreg;
2798 if (ARCH_HAVE_DELEGATE_TRAMPOLINES && (method->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (method->name, "Invoke")) {
2799 MonoInst *dummy_use;
2801 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2803 /* Make a call to delegate->invoke_impl */
2804 call->inst.inst_basereg = this_reg;
2805 call->inst.inst_offset = MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2806 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2808 /* We must emit a dummy use here because the delegate trampoline will
2809 replace the 'this' argument with the delegate target making this activation
2810 no longer a root for the delegate.
2811 This is an issue for delegates that target collectible code such as dynamic
2812 methods of GC'able assemblies.
2814 For a test case look into #667921.
2816 FIXME: a dummy use is not the best way to do it as the local register allocator
2817 will put it on a caller save register and spil it around the call.
2818 Ideally, we would either put it on a callee save register or only do the store part.
2820 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [0]);
2822 return (MonoInst*)call;
2825 if ((!cfg->compile_aot || enable_for_aot) &&
2826 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2827 (MONO_METHOD_IS_FINAL (method) &&
2828 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2829 !(mono_class_is_marshalbyref (method->klass) && context_used)) {
2831 * the method is not virtual, we just need to ensure this is not null
2832 * and then we can call the method directly.
2834 #ifndef DISABLE_REMOTING
2835 if (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) {
2837 * The check above ensures method is not gshared, this is needed since
2838 * gshared methods can't have wrappers.
2840 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2844 if (!method->string_ctor)
2845 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2847 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2848 } else if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2850 * the method is virtual, but we can statically dispatch since either
2851 * it's class or the method itself are sealed.
2852 * But first we need to ensure it's not a null reference.
2854 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2856 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2858 vtable_reg = alloc_preg (cfg);
2859 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2860 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2863 guint32 imt_slot = mono_method_get_imt_slot (method);
2864 emit_imt_argument (cfg, call, call->method, imt_arg);
2865 slot_reg = vtable_reg;
2866 offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2868 if (slot_reg == -1) {
2869 slot_reg = alloc_preg (cfg);
2870 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2871 offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
2874 slot_reg = vtable_reg;
2875 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) +
2876 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2878 g_assert (mono_method_signature (method)->generic_param_count);
2879 emit_imt_argument (cfg, call, call->method, imt_arg);
2883 call->inst.sreg1 = slot_reg;
2884 call->inst.inst_offset = offset;
2885 call->virtual = TRUE;
2889 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2892 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2894 return (MonoInst*)call;
2898 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2900 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), FALSE, args, this, NULL, NULL);
2904 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2911 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE, FALSE, FALSE);
2914 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2916 return (MonoInst*)call;
2920 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2922 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2926 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2930 * mono_emit_abs_call:
2932 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2934 inline static MonoInst*
2935 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2936 MonoMethodSignature *sig, MonoInst **args)
2938 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2942 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2945 if (cfg->abs_patches == NULL)
2946 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2947 g_hash_table_insert (cfg->abs_patches, ji, ji);
2948 ins = mono_emit_native_call (cfg, ji, sig, args);
2949 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2954 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2956 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2957 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2961 * Native code might return non register sized integers
2962 * without initializing the upper bits.
2964 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
2965 case OP_LOADI1_MEMBASE:
2966 widen_op = OP_ICONV_TO_I1;
2968 case OP_LOADU1_MEMBASE:
2969 widen_op = OP_ICONV_TO_U1;
2971 case OP_LOADI2_MEMBASE:
2972 widen_op = OP_ICONV_TO_I2;
2974 case OP_LOADU2_MEMBASE:
2975 widen_op = OP_ICONV_TO_U2;
2981 if (widen_op != -1) {
2982 int dreg = alloc_preg (cfg);
2985 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
2986 widen->type = ins->type;
2996 get_memcpy_method (void)
2998 static MonoMethod *memcpy_method = NULL;
2999 if (!memcpy_method) {
3000 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
3002 g_error ("Old corlib found. Install a new one");
3004 return memcpy_method;
3008 create_write_barrier_bitmap (MonoCompile *cfg, MonoClass *klass, unsigned *wb_bitmap, int offset)
3010 MonoClassField *field;
3011 gpointer iter = NULL;
3013 while ((field = mono_class_get_fields (klass, &iter))) {
3016 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
3018 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
3019 if (mini_type_is_reference (cfg, mono_field_get_type (field))) {
3020 g_assert ((foffset % SIZEOF_VOID_P) == 0);
3021 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
3023 MonoClass *field_class = mono_class_from_mono_type (field->type);
3024 if (field_class->has_references)
3025 create_write_barrier_bitmap (cfg, field_class, wb_bitmap, offset + foffset);
3031 emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value)
3033 int card_table_shift_bits;
3034 gpointer card_table_mask;
3036 MonoInst *dummy_use;
3037 int nursery_shift_bits;
3038 size_t nursery_size;
3039 gboolean has_card_table_wb = FALSE;
3041 if (!cfg->gen_write_barriers)
3044 card_table = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
3046 mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
3048 #ifdef MONO_ARCH_HAVE_CARD_TABLE_WBARRIER
3049 has_card_table_wb = TRUE;
3052 if (has_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0 && !COMPILE_LLVM (cfg)) {
3055 MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
3056 wbarrier->sreg1 = ptr->dreg;
3057 wbarrier->sreg2 = value->dreg;
3058 MONO_ADD_INS (cfg->cbb, wbarrier);
3059 } else if (card_table) {
3060 int offset_reg = alloc_preg (cfg);
3061 int card_reg = alloc_preg (cfg);
3064 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
3065 if (card_table_mask)
3066 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
3068 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
3069 * IMM's larger than 32bits.
3071 if (cfg->compile_aot) {
3072 MONO_EMIT_NEW_AOTCONST (cfg, card_reg, NULL, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR);
3074 MONO_INST_NEW (cfg, ins, OP_PCONST);
3075 ins->inst_p0 = card_table;
3076 ins->dreg = card_reg;
3077 MONO_ADD_INS (cfg->cbb, ins);
3080 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
3081 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
3083 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
3084 mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
3087 EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
3091 mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
3093 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
3094 unsigned need_wb = 0;
3099 /*types with references can't have alignment smaller than sizeof(void*) */
3100 if (align < SIZEOF_VOID_P)
3103 /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
3104 if (size > 32 * SIZEOF_VOID_P)
3107 create_write_barrier_bitmap (cfg, klass, &need_wb, 0);
3109 /* We don't unroll more than 5 stores to avoid code bloat. */
3110 if (size > 5 * SIZEOF_VOID_P) {
3111 /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
3112 size += (SIZEOF_VOID_P - 1);
3113 size &= ~(SIZEOF_VOID_P - 1);
3115 EMIT_NEW_ICONST (cfg, iargs [2], size);
3116 EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
3117 mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
3121 destreg = iargs [0]->dreg;
3122 srcreg = iargs [1]->dreg;
3125 dest_ptr_reg = alloc_preg (cfg);
3126 tmp_reg = alloc_preg (cfg);
3129 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
3131 while (size >= SIZEOF_VOID_P) {
3132 MonoInst *load_inst;
3133 MONO_INST_NEW (cfg, load_inst, OP_LOAD_MEMBASE);
3134 load_inst->dreg = tmp_reg;
3135 load_inst->inst_basereg = srcreg;
3136 load_inst->inst_offset = offset;
3137 MONO_ADD_INS (cfg->cbb, load_inst);
3139 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
3142 emit_write_barrier (cfg, iargs [0], load_inst);
3144 offset += SIZEOF_VOID_P;
3145 size -= SIZEOF_VOID_P;
3148 /*tmp += sizeof (void*)*/
3149 if (size >= SIZEOF_VOID_P) {
3150 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
3151 MONO_ADD_INS (cfg->cbb, iargs [0]);
3155 /* Those cannot be references since size < sizeof (void*) */
3157 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
3158 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
3164 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
3165 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
3171 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
3172 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
3181 * Emit code to copy a valuetype of type @klass whose address is stored in
3182 * @src->dreg to memory whose address is stored at @dest->dreg.
3185 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
3187 MonoInst *iargs [4];
3188 int context_used, n;
3190 MonoMethod *memcpy_method;
3191 MonoInst *size_ins = NULL;
3192 MonoInst *memcpy_ins = NULL;
3196 * This check breaks with spilled vars... need to handle it during verification anyway.
3197 * g_assert (klass && klass == src->klass && klass == dest->klass);
3200 if (mini_is_gsharedvt_klass (cfg, klass)) {
3202 context_used = mini_class_check_context_used (cfg, klass);
3203 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3204 memcpy_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_MEMCPY);
3208 n = mono_class_native_size (klass, &align);
3210 n = mono_class_value_size (klass, &align);
3212 /* if native is true there should be no references in the struct */
3213 if (cfg->gen_write_barriers && (klass->has_references || size_ins) && !native) {
3214 /* Avoid barriers when storing to the stack */
3215 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
3216 (dest->opcode == OP_LDADDR))) {
3222 context_used = mini_class_check_context_used (cfg, klass);
3224 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
3225 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
3227 } else if (context_used) {
3228 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3230 if (cfg->compile_aot) {
3231 EMIT_NEW_CLASSCONST (cfg, iargs [2], klass);
3233 EMIT_NEW_PCONST (cfg, iargs [2], klass);
3234 mono_class_compute_gc_descriptor (klass);
3239 mono_emit_jit_icall (cfg, mono_gsharedvt_value_copy, iargs);
3241 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
3246 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
3247 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
3248 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
3253 iargs [2] = size_ins;
3255 EMIT_NEW_ICONST (cfg, iargs [2], n);
3257 memcpy_method = get_memcpy_method ();
3259 mono_emit_calli (cfg, mono_method_signature (memcpy_method), iargs, memcpy_ins, NULL, NULL);
3261 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
3266 get_memset_method (void)
3268 static MonoMethod *memset_method = NULL;
3269 if (!memset_method) {
3270 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
3272 g_error ("Old corlib found. Install a new one");
3274 return memset_method;
3278 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
3280 MonoInst *iargs [3];
3281 int n, context_used;
3283 MonoMethod *memset_method;
3284 MonoInst *size_ins = NULL;
3285 MonoInst *bzero_ins = NULL;
3286 static MonoMethod *bzero_method;
3288 /* FIXME: Optimize this for the case when dest is an LDADDR */
3290 mono_class_init (klass);
3291 if (mini_is_gsharedvt_klass (cfg, klass)) {
3292 context_used = mini_class_check_context_used (cfg, klass);
3293 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3294 bzero_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_BZERO);
3296 bzero_method = mono_class_get_method_from_name (mono_defaults.string_class, "bzero_aligned_1", 2);
3297 g_assert (bzero_method);
3299 iargs [1] = size_ins;
3300 mono_emit_calli (cfg, mono_method_signature (bzero_method), iargs, bzero_ins, NULL, NULL);
3304 n = mono_class_value_size (klass, &align);
3306 if (n <= sizeof (gpointer) * 5) {
3307 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
3310 memset_method = get_memset_method ();
3312 EMIT_NEW_ICONST (cfg, iargs [1], 0);
3313 EMIT_NEW_ICONST (cfg, iargs [2], n);
3314 mono_emit_method_call (cfg, memset_method, iargs, NULL);
3319 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
3321 MonoInst *this = NULL;
3323 g_assert (cfg->generic_sharing_context);
3325 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
3326 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
3327 !method->klass->valuetype)
3328 EMIT_NEW_ARGLOAD (cfg, this, 0);
3330 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
3331 MonoInst *mrgctx_loc, *mrgctx_var;
3334 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
3336 mrgctx_loc = mono_get_vtable_var (cfg);
3337 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
3340 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
3341 MonoInst *vtable_loc, *vtable_var;
3345 vtable_loc = mono_get_vtable_var (cfg);
3346 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
3348 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
3349 MonoInst *mrgctx_var = vtable_var;
3352 vtable_reg = alloc_preg (cfg);
3353 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, MONO_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
3354 vtable_var->type = STACK_PTR;
3362 vtable_reg = alloc_preg (cfg);
3363 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3368 static MonoJumpInfoRgctxEntry *
3369 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, MonoRgctxInfoType info_type)
3371 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
3372 res->method = method;
3373 res->in_mrgctx = in_mrgctx;
3374 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
3375 res->data->type = patch_type;
3376 res->data->data.target = patch_data;
3377 res->info_type = info_type;
3382 static inline MonoInst*
3383 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3385 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
3389 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
3390 MonoClass *klass, MonoRgctxInfoType rgctx_type)
3392 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
3393 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3395 return emit_rgctx_fetch (cfg, rgctx, entry);
3399 emit_get_rgctx_sig (MonoCompile *cfg, int context_used,
3400 MonoMethodSignature *sig, MonoRgctxInfoType rgctx_type)
3402 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_SIGNATURE, sig, rgctx_type);
3403 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3405 return emit_rgctx_fetch (cfg, rgctx, entry);
3409 emit_get_rgctx_gsharedvt_call (MonoCompile *cfg, int context_used,
3410 MonoMethodSignature *sig, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3412 MonoJumpInfoGSharedVtCall *call_info;
3413 MonoJumpInfoRgctxEntry *entry;
3416 call_info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoGSharedVtCall));
3417 call_info->sig = sig;
3418 call_info->method = cmethod;
3420 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_CALL, call_info, rgctx_type);
3421 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3423 return emit_rgctx_fetch (cfg, rgctx, entry);
3428 emit_get_rgctx_gsharedvt_method (MonoCompile *cfg, int context_used,
3429 MonoMethod *cmethod, MonoGSharedVtMethodInfo *info)
3431 MonoJumpInfoRgctxEntry *entry;
3434 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_METHOD, info, MONO_RGCTX_INFO_METHOD_GSHAREDVT_INFO);
3435 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3437 return emit_rgctx_fetch (cfg, rgctx, entry);
3441 * emit_get_rgctx_method:
3443 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
3444 * normal constants, else emit a load from the rgctx.
3447 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
3448 MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3450 if (!context_used) {
3453 switch (rgctx_type) {
3454 case MONO_RGCTX_INFO_METHOD:
3455 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
3457 case MONO_RGCTX_INFO_METHOD_RGCTX:
3458 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
3461 g_assert_not_reached ();
3464 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
3465 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3467 return emit_rgctx_fetch (cfg, rgctx, entry);
3472 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
3473 MonoClassField *field, MonoRgctxInfoType rgctx_type)
3475 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
3476 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3478 return emit_rgctx_fetch (cfg, rgctx, entry);
3482 get_gsharedvt_info_slot (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3484 MonoGSharedVtMethodInfo *info = cfg->gsharedvt_info;
3485 MonoRuntimeGenericContextInfoTemplate *template;
3490 for (i = 0; i < info->num_entries; ++i) {
3491 MonoRuntimeGenericContextInfoTemplate *otemplate = &info->entries [i];
3493 if (otemplate->info_type == rgctx_type && otemplate->data == data && rgctx_type != MONO_RGCTX_INFO_LOCAL_OFFSET)
3497 if (info->num_entries == info->count_entries) {
3498 MonoRuntimeGenericContextInfoTemplate *new_entries;
3499 int new_count_entries = info->count_entries ? info->count_entries * 2 : 16;
3501 new_entries = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * new_count_entries);
3503 memcpy (new_entries, info->entries, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
3504 info->entries = new_entries;
3505 info->count_entries = new_count_entries;
3508 idx = info->num_entries;
3509 template = &info->entries [idx];
3510 template->info_type = rgctx_type;
3511 template->data = data;
3513 info->num_entries ++;
3519 * emit_get_gsharedvt_info:
3521 * This is similar to emit_get_rgctx_.., but loads the data from the gsharedvt info var instead of calling an rgctx fetch trampoline.
3524 emit_get_gsharedvt_info (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3529 idx = get_gsharedvt_info_slot (cfg, data, rgctx_type);
3530 /* Load info->entries [idx] */
3531 dreg = alloc_preg (cfg);
3532 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, cfg->gsharedvt_info_var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
3538 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type)
3540 return emit_get_gsharedvt_info (cfg, &klass->byval_arg, rgctx_type);
3544 * On return the caller must check @klass for load errors.
3547 emit_generic_class_init (MonoCompile *cfg, MonoClass *klass)
3549 MonoInst *vtable_arg;
3553 context_used = mini_class_check_context_used (cfg, klass);
3556 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
3557 klass, MONO_RGCTX_INFO_VTABLE);
3559 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3563 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
3566 if (COMPILE_LLVM (cfg))
3567 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline_llvm, &vtable_arg);
3569 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable_arg);
3570 #ifdef MONO_ARCH_VTABLE_REG
3571 mono_call_inst_add_outarg_reg (cfg, call, vtable_arg->dreg, MONO_ARCH_VTABLE_REG, FALSE);
3572 cfg->uses_vtable_reg = TRUE;
3579 emit_seq_point (MonoCompile *cfg, MonoMethod *method, guint8* ip, gboolean intr_loc, gboolean nonempty_stack)
3583 if (cfg->gen_seq_points && cfg->method == method) {
3584 NEW_SEQ_POINT (cfg, ins, ip - cfg->header->code, intr_loc);
3586 ins->flags |= MONO_INST_NONEMPTY_STACK;
3587 MONO_ADD_INS (cfg->cbb, ins);
3592 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg, gboolean null_check, MonoBasicBlock **out_bblock)
3594 if (mini_get_debug_options ()->better_cast_details) {
3595 int vtable_reg = alloc_preg (cfg);
3596 int klass_reg = alloc_preg (cfg);
3597 MonoBasicBlock *is_null_bb = NULL;
3599 int to_klass_reg, context_used;
3602 NEW_BBLOCK (cfg, is_null_bb);
3604 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3605 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3608 tls_get = mono_get_jit_tls_intrinsic (cfg);
3610 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3614 MONO_ADD_INS (cfg->cbb, tls_get);
3615 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3616 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3618 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3620 context_used = mini_class_check_context_used (cfg, klass);
3622 MonoInst *class_ins;
3624 class_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3625 to_klass_reg = class_ins->dreg;
3627 to_klass_reg = alloc_preg (cfg);
3628 MONO_EMIT_NEW_CLASSCONST (cfg, to_klass_reg, klass);
3630 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3633 MONO_START_BB (cfg, is_null_bb);
3635 *out_bblock = cfg->cbb;
3641 reset_cast_details (MonoCompile *cfg)
3643 /* Reset the variables holding the cast details */
3644 if (mini_get_debug_options ()->better_cast_details) {
3645 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3647 MONO_ADD_INS (cfg->cbb, tls_get);
3648 /* It is enough to reset the from field */
3649 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3654 * On return the caller must check @array_class for load errors
3657 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
3659 int vtable_reg = alloc_preg (cfg);
3662 context_used = mini_class_check_context_used (cfg, array_class);
3664 save_cast_details (cfg, array_class, obj->dreg, FALSE, NULL);
3666 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3668 if (cfg->opt & MONO_OPT_SHARED) {
3669 int class_reg = alloc_preg (cfg);
3670 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3671 if (cfg->compile_aot) {
3672 int klass_reg = alloc_preg (cfg);
3673 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
3674 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
3676 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
3678 } else if (context_used) {
3679 MonoInst *vtable_ins;
3681 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
3682 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
3684 if (cfg->compile_aot) {
3688 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3690 vt_reg = alloc_preg (cfg);
3691 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
3692 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
3695 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3697 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
3701 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
3703 reset_cast_details (cfg);
3707 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
3708 * generic code is generated.
3711 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
3713 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
3716 MonoInst *rgctx, *addr;
3718 /* FIXME: What if the class is shared? We might not
3719 have to get the address of the method from the
3721 addr = emit_get_rgctx_method (cfg, context_used, method,
3722 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3724 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3726 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
3728 gboolean pass_vtable, pass_mrgctx;
3729 MonoInst *rgctx_arg = NULL;
3731 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
3732 g_assert (!pass_mrgctx);
3735 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3738 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
3741 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
3746 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
3750 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
3751 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
3752 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
3753 int rank_reg = alloc_dreg (cfg ,STACK_I4);
3755 obj_reg = sp [0]->dreg;
3756 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3757 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
3759 /* FIXME: generics */
3760 g_assert (klass->rank == 0);
3763 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
3764 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3766 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3767 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, element_class));
3770 MonoInst *element_class;
3772 /* This assertion is from the unboxcast insn */
3773 g_assert (klass->rank == 0);
3775 element_class = emit_get_rgctx_klass (cfg, context_used,
3776 klass->element_class, MONO_RGCTX_INFO_KLASS);
3778 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
3779 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3781 save_cast_details (cfg, klass->element_class, obj_reg, FALSE, NULL);
3782 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
3783 reset_cast_details (cfg);
3786 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), obj_reg, sizeof (MonoObject));
3787 MONO_ADD_INS (cfg->cbb, add);
3788 add->type = STACK_MP;
3795 handle_unbox_gsharedvt (MonoCompile *cfg, MonoClass *klass, MonoInst *obj, MonoBasicBlock **out_cbb)
3797 MonoInst *addr, *klass_inst, *is_ref, *args[16];
3798 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
3802 klass_inst = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_KLASS);
3808 args [1] = klass_inst;
3811 obj = mono_emit_jit_icall (cfg, mono_object_castclass_unbox, args);
3813 NEW_BBLOCK (cfg, is_ref_bb);
3814 NEW_BBLOCK (cfg, is_nullable_bb);
3815 NEW_BBLOCK (cfg, end_bb);
3816 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
3817 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 1);
3818 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
3820 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 2);
3821 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
3823 /* This will contain either the address of the unboxed vtype, or an address of the temporary where the ref is stored */
3824 addr_reg = alloc_dreg (cfg, STACK_MP);
3828 NEW_BIALU_IMM (cfg, addr, OP_ADD_IMM, addr_reg, obj->dreg, sizeof (MonoObject));
3829 MONO_ADD_INS (cfg->cbb, addr);
3831 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3834 MONO_START_BB (cfg, is_ref_bb);
3836 /* Save the ref to a temporary */
3837 dreg = alloc_ireg (cfg);
3838 EMIT_NEW_VARLOADA_VREG (cfg, addr, dreg, &klass->byval_arg);
3839 addr->dreg = addr_reg;
3840 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, obj->dreg);
3841 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3844 MONO_START_BB (cfg, is_nullable_bb);
3847 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_NULLABLE_CLASS_UNBOX);
3848 MonoInst *unbox_call;
3849 MonoMethodSignature *unbox_sig;
3852 var = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
3854 unbox_sig = mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
3855 unbox_sig->ret = &klass->byval_arg;
3856 unbox_sig->param_count = 1;
3857 unbox_sig->params [0] = &mono_defaults.object_class->byval_arg;
3858 unbox_call = mono_emit_calli (cfg, unbox_sig, &obj, addr, NULL, NULL);
3860 EMIT_NEW_VARLOADA_VREG (cfg, addr, unbox_call->dreg, &klass->byval_arg);
3861 addr->dreg = addr_reg;
3864 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3867 MONO_START_BB (cfg, end_bb);
3870 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr_reg, 0);
3872 *out_cbb = cfg->cbb;
3878 * Returns NULL and set the cfg exception on error.
3881 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
3883 MonoInst *iargs [2];
3889 MonoInst *iargs [2];
3891 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (klass, for_box);
3893 if (cfg->opt & MONO_OPT_SHARED)
3894 rgctx_info = MONO_RGCTX_INFO_KLASS;
3896 rgctx_info = MONO_RGCTX_INFO_VTABLE;
3897 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
3899 if (cfg->opt & MONO_OPT_SHARED) {
3900 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3902 alloc_ftn = mono_object_new;
3905 alloc_ftn = mono_object_new_specific;
3908 if (managed_alloc && !(cfg->opt & MONO_OPT_SHARED))
3909 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3911 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3914 if (cfg->opt & MONO_OPT_SHARED) {
3915 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3916 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
3918 alloc_ftn = mono_object_new;
3919 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
3920 /* This happens often in argument checking code, eg. throw new FooException... */
3921 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
3922 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
3923 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
3925 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3926 MonoMethod *managed_alloc = NULL;
3930 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
3931 cfg->exception_ptr = klass;
3935 #ifndef MONO_CROSS_COMPILE
3936 managed_alloc = mono_gc_get_managed_allocator (klass, for_box);
3939 if (managed_alloc) {
3940 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3941 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3943 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
3945 guint32 lw = vtable->klass->instance_size;
3946 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
3947 EMIT_NEW_ICONST (cfg, iargs [0], lw);
3948 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
3951 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3955 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3959 * Returns NULL and set the cfg exception on error.
3962 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used, MonoBasicBlock **out_cbb)
3964 MonoInst *alloc, *ins;
3966 *out_cbb = cfg->cbb;
3968 if (mono_class_is_nullable (klass)) {
3969 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
3972 /* FIXME: What if the class is shared? We might not
3973 have to get the method address from the RGCTX. */
3974 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
3975 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3976 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3978 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
3980 gboolean pass_vtable, pass_mrgctx;
3981 MonoInst *rgctx_arg = NULL;
3983 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
3984 g_assert (!pass_mrgctx);
3987 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3990 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
3993 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
3997 if (mini_is_gsharedvt_klass (cfg, klass)) {
3998 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
3999 MonoInst *res, *is_ref, *src_var, *addr;
4002 dreg = alloc_ireg (cfg);
4004 NEW_BBLOCK (cfg, is_ref_bb);
4005 NEW_BBLOCK (cfg, is_nullable_bb);
4006 NEW_BBLOCK (cfg, end_bb);
4007 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
4008 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 1);
4009 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
4011 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 2);
4012 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
4015 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4018 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4019 ins->opcode = OP_STOREV_MEMBASE;
4021 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, alloc->dreg);
4022 res->type = STACK_OBJ;
4024 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4027 MONO_START_BB (cfg, is_ref_bb);
4028 addr_reg = alloc_ireg (cfg);
4030 /* val is a vtype, so has to load the value manually */
4031 src_var = get_vreg_to_inst (cfg, val->dreg);
4033 src_var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, val->dreg);
4034 EMIT_NEW_VARLOADA (cfg, addr, src_var, src_var->inst_vtype);
4035 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, addr->dreg, 0);
4036 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4039 MONO_START_BB (cfg, is_nullable_bb);
4042 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass,
4043 MONO_RGCTX_INFO_NULLABLE_CLASS_BOX);
4045 MonoMethodSignature *box_sig;
4048 * klass is Nullable<T>, need to call Nullable<T>.Box () using a gsharedvt signature, but we cannot
4049 * construct that method at JIT time, so have to do things by hand.
4051 box_sig = mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
4052 box_sig->ret = &mono_defaults.object_class->byval_arg;
4053 box_sig->param_count = 1;
4054 box_sig->params [0] = &klass->byval_arg;
4055 box_call = mono_emit_calli (cfg, box_sig, &val, addr, NULL, NULL);
4056 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, box_call->dreg);
4057 res->type = STACK_OBJ;
4061 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4063 MONO_START_BB (cfg, end_bb);
4065 *out_cbb = cfg->cbb;
4069 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4073 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4080 mini_class_has_reference_variant_generic_argument (MonoCompile *cfg, MonoClass *klass, int context_used)
4083 MonoGenericContainer *container;
4084 MonoGenericInst *ginst;
4086 if (klass->generic_class) {
4087 container = klass->generic_class->container_class->generic_container;
4088 ginst = klass->generic_class->context.class_inst;
4089 } else if (klass->generic_container && context_used) {
4090 container = klass->generic_container;
4091 ginst = container->context.class_inst;
4096 for (i = 0; i < container->type_argc; ++i) {
4098 if (!(mono_generic_container_get_param_info (container, i)->flags & (MONO_GEN_PARAM_VARIANT|MONO_GEN_PARAM_COVARIANT)))
4100 type = ginst->type_argv [i];
4101 if (mini_type_is_reference (cfg, type))
4107 #define is_complex_isinst(klass) ((klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || mono_class_is_marshalbyref (klass) || (klass->flags & TYPE_ATTRIBUTE_SEALED) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
4110 emit_castclass_with_cache (MonoCompile *cfg, MonoClass *klass, MonoInst **args, MonoBasicBlock **out_bblock)
4112 MonoMethod *mono_castclass;
4115 mono_castclass = mono_marshal_get_castclass_with_cache ();
4117 save_cast_details (cfg, klass, args [0]->dreg, TRUE, out_bblock);
4118 res = mono_emit_method_call (cfg, mono_castclass, args, NULL);
4119 reset_cast_details (cfg);
4120 *out_bblock = cfg->cbb;
4126 emit_castclass_with_cache_nonshared (MonoCompile *cfg, MonoInst *obj, MonoClass *klass, MonoBasicBlock **out_bblock)
4135 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
4138 if (cfg->compile_aot) {
4139 /* Each CASTCLASS_CACHE patch needs a unique index which identifies the call site */
4140 cfg->castclass_cache_index ++;
4141 idx = (cfg->method_index << 16) | cfg->castclass_cache_index;
4142 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, GINT_TO_POINTER (idx));
4144 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
4147 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
4149 return emit_castclass_with_cache (cfg, klass, args, out_bblock);
4153 * Returns NULL and set the cfg exception on error.
4156 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, guint8 *ip, MonoBasicBlock **out_bb, int *inline_costs)
4158 MonoBasicBlock *is_null_bb;
4159 int obj_reg = src->dreg;
4160 int vtable_reg = alloc_preg (cfg);
4162 MonoInst *klass_inst = NULL, *res;
4163 MonoBasicBlock *bblock;
4167 context_used = mini_class_check_context_used (cfg, klass);
4169 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
4170 res = emit_castclass_with_cache_nonshared (cfg, src, klass, &bblock);
4171 (*inline_costs) += 2;
4174 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
4175 MonoMethod *mono_castclass;
4176 MonoInst *iargs [1];
4179 mono_castclass = mono_marshal_get_castclass (klass);
4182 save_cast_details (cfg, klass, src->dreg, TRUE, &bblock);
4183 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
4184 iargs, ip, cfg->real_offset, TRUE, &bblock);
4185 reset_cast_details (cfg);
4186 CHECK_CFG_EXCEPTION;
4187 g_assert (costs > 0);
4189 cfg->real_offset += 5;
4191 (*inline_costs) += costs;
4200 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
4201 MonoInst *cache_ins;
4203 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
4208 /* klass - it's the second element of the cache entry*/
4209 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
4212 args [2] = cache_ins;
4214 return emit_castclass_with_cache (cfg, klass, args, out_bb);
4217 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
4220 NEW_BBLOCK (cfg, is_null_bb);
4222 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4223 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
4225 save_cast_details (cfg, klass, obj_reg, FALSE, NULL);
4227 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4228 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4229 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
4231 int klass_reg = alloc_preg (cfg);
4233 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4235 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4236 /* the remoting code is broken, access the class for now */
4237 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4238 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4240 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4241 cfg->exception_ptr = klass;
4244 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4246 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4247 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4249 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
4251 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4252 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
4256 MONO_START_BB (cfg, is_null_bb);
4258 reset_cast_details (cfg);
4269 * Returns NULL and set the cfg exception on error.
4272 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
4275 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
4276 int obj_reg = src->dreg;
4277 int vtable_reg = alloc_preg (cfg);
4278 int res_reg = alloc_ireg_ref (cfg);
4279 MonoInst *klass_inst = NULL;
4284 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
4285 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
4286 MonoInst *cache_ins;
4288 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
4293 /* klass - it's the second element of the cache entry*/
4294 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
4297 args [2] = cache_ins;
4299 return mono_emit_method_call (cfg, mono_isinst, args, NULL);
4302 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
4305 NEW_BBLOCK (cfg, is_null_bb);
4306 NEW_BBLOCK (cfg, false_bb);
4307 NEW_BBLOCK (cfg, end_bb);
4309 /* Do the assignment at the beginning, so the other assignment can be if converted */
4310 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
4311 ins->type = STACK_OBJ;
4314 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4315 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
4317 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4319 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4320 g_assert (!context_used);
4321 /* the is_null_bb target simply copies the input register to the output */
4322 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
4324 int klass_reg = alloc_preg (cfg);
4327 int rank_reg = alloc_preg (cfg);
4328 int eclass_reg = alloc_preg (cfg);
4330 g_assert (!context_used);
4331 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
4332 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
4333 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4334 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4335 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, cast_class));
4336 if (klass->cast_class == mono_defaults.object_class) {
4337 int parent_reg = alloc_preg (cfg);
4338 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, MONO_STRUCT_OFFSET (MonoClass, parent));
4339 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
4340 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4341 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4342 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
4343 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
4344 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4345 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4346 } else if (klass->cast_class == mono_defaults.enum_class) {
4347 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4348 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4349 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
4350 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4352 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
4353 /* Check that the object is a vector too */
4354 int bounds_reg = alloc_preg (cfg);
4355 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, MONO_STRUCT_OFFSET (MonoArray, bounds));
4356 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
4357 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4360 /* the is_null_bb target simply copies the input register to the output */
4361 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4363 } else if (mono_class_is_nullable (klass)) {
4364 g_assert (!context_used);
4365 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4366 /* the is_null_bb target simply copies the input register to the output */
4367 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
4369 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4370 g_assert (!context_used);
4371 /* the remoting code is broken, access the class for now */
4372 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4373 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4375 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4376 cfg->exception_ptr = klass;
4379 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4381 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4382 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4384 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4385 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
4387 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4388 /* the is_null_bb target simply copies the input register to the output */
4389 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
4394 MONO_START_BB (cfg, false_bb);
4396 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
4397 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4399 MONO_START_BB (cfg, is_null_bb);
4401 MONO_START_BB (cfg, end_bb);
4407 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4409 /* This opcode takes as input an object reference and a class, and returns:
4410 0) if the object is an instance of the class,
4411 1) if the object is not instance of the class,
4412 2) if the object is a proxy whose type cannot be determined */
4415 #ifndef DISABLE_REMOTING
4416 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
4418 MonoBasicBlock *true_bb, *false_bb, *end_bb;
4420 int obj_reg = src->dreg;
4421 int dreg = alloc_ireg (cfg);
4423 #ifndef DISABLE_REMOTING
4424 int klass_reg = alloc_preg (cfg);
4427 NEW_BBLOCK (cfg, true_bb);
4428 NEW_BBLOCK (cfg, false_bb);
4429 NEW_BBLOCK (cfg, end_bb);
4430 #ifndef DISABLE_REMOTING
4431 NEW_BBLOCK (cfg, false2_bb);
4432 NEW_BBLOCK (cfg, no_proxy_bb);
4435 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4436 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
4438 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4439 #ifndef DISABLE_REMOTING
4440 NEW_BBLOCK (cfg, interface_fail_bb);
4443 tmp_reg = alloc_preg (cfg);
4444 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4445 #ifndef DISABLE_REMOTING
4446 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
4447 MONO_START_BB (cfg, interface_fail_bb);
4448 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4450 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
4452 tmp_reg = alloc_preg (cfg);
4453 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4454 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4455 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
4457 mini_emit_iface_cast (cfg, tmp_reg, klass, false_bb, true_bb);
4460 #ifndef DISABLE_REMOTING
4461 tmp_reg = alloc_preg (cfg);
4462 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4463 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4465 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4466 tmp_reg = alloc_preg (cfg);
4467 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4468 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4470 tmp_reg = alloc_preg (cfg);
4471 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4472 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4473 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4475 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
4476 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
4478 MONO_START_BB (cfg, no_proxy_bb);
4480 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
4482 g_error ("transparent proxy support is disabled while trying to JIT code that uses it");
4486 MONO_START_BB (cfg, false_bb);
4488 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4489 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4491 #ifndef DISABLE_REMOTING
4492 MONO_START_BB (cfg, false2_bb);
4494 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
4495 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4498 MONO_START_BB (cfg, true_bb);
4500 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4502 MONO_START_BB (cfg, end_bb);
4505 MONO_INST_NEW (cfg, ins, OP_ICONST);
4507 ins->type = STACK_I4;
4513 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4515 /* This opcode takes as input an object reference and a class, and returns:
4516 0) if the object is an instance of the class,
4517 1) if the object is a proxy whose type cannot be determined
4518 an InvalidCastException exception is thrown otherwhise*/
4521 #ifndef DISABLE_REMOTING
4522 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
4524 MonoBasicBlock *ok_result_bb;
4526 int obj_reg = src->dreg;
4527 int dreg = alloc_ireg (cfg);
4528 int tmp_reg = alloc_preg (cfg);
4530 #ifndef DISABLE_REMOTING
4531 int klass_reg = alloc_preg (cfg);
4532 NEW_BBLOCK (cfg, end_bb);
4535 NEW_BBLOCK (cfg, ok_result_bb);
4537 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4538 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
4540 save_cast_details (cfg, klass, obj_reg, FALSE, NULL);
4542 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4543 #ifndef DISABLE_REMOTING
4544 NEW_BBLOCK (cfg, interface_fail_bb);
4546 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4547 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
4548 MONO_START_BB (cfg, interface_fail_bb);
4549 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4551 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
4553 tmp_reg = alloc_preg (cfg);
4554 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4555 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4556 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
4558 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4559 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4561 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4562 mini_emit_iface_cast (cfg, tmp_reg, klass, NULL, NULL);
4563 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, ok_result_bb);
4566 #ifndef DISABLE_REMOTING
4567 NEW_BBLOCK (cfg, no_proxy_bb);
4569 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4570 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4571 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4573 tmp_reg = alloc_preg (cfg);
4574 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4575 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4577 tmp_reg = alloc_preg (cfg);
4578 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4579 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4580 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4582 NEW_BBLOCK (cfg, fail_1_bb);
4584 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
4586 MONO_START_BB (cfg, fail_1_bb);
4588 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4589 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4591 MONO_START_BB (cfg, no_proxy_bb);
4593 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
4595 g_error ("Transparent proxy support is disabled while trying to JIT code that uses it");
4599 MONO_START_BB (cfg, ok_result_bb);
4601 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4603 #ifndef DISABLE_REMOTING
4604 MONO_START_BB (cfg, end_bb);
4608 MONO_INST_NEW (cfg, ins, OP_ICONST);
4610 ins->type = STACK_I4;
4616 * Returns NULL and set the cfg exception on error.
4618 static G_GNUC_UNUSED MonoInst*
4619 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used, gboolean virtual)
4623 gpointer trampoline;
4624 MonoInst *obj, *method_ins, *tramp_ins;
4628 // FIXME reenable optimisation for virtual case
4633 MonoMethod *invoke = mono_get_delegate_invoke (klass);
4636 if (!mono_get_delegate_virtual_invoke_impl (mono_method_signature (invoke), context_used ? NULL : method))
4640 obj = handle_alloc (cfg, klass, FALSE, 0);
4644 /* Inline the contents of mono_delegate_ctor */
4646 /* Set target field */
4647 /* Optimize away setting of NULL target */
4648 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0)) {
4649 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
4650 if (cfg->gen_write_barriers) {
4651 dreg = alloc_preg (cfg);
4652 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target));
4653 emit_write_barrier (cfg, ptr, target);
4657 /* Set method field */
4658 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
4659 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
4662 * To avoid looking up the compiled code belonging to the target method
4663 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
4664 * store it, and we fill it after the method has been compiled.
4666 if (!method->dynamic && !(cfg->opt & MONO_OPT_SHARED)) {
4667 MonoInst *code_slot_ins;
4670 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
4672 domain = mono_domain_get ();
4673 mono_domain_lock (domain);
4674 if (!domain_jit_info (domain)->method_code_hash)
4675 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
4676 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
4678 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
4679 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
4681 mono_domain_unlock (domain);
4683 if (cfg->compile_aot)
4684 EMIT_NEW_AOTCONST (cfg, code_slot_ins, MONO_PATCH_INFO_METHOD_CODE_SLOT, method);
4686 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
4688 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
4691 if (cfg->compile_aot) {
4692 MonoDelegateClassMethodPair *del_tramp;
4694 del_tramp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoDelegateClassMethodPair));
4695 del_tramp->klass = klass;
4696 del_tramp->method = context_used ? NULL : method;
4697 del_tramp->virtual = virtual;
4698 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, del_tramp);
4701 trampoline = mono_create_delegate_virtual_trampoline (cfg->domain, klass, context_used ? NULL : method);
4703 trampoline = mono_create_delegate_trampoline_info (cfg->domain, klass, context_used ? NULL : method);
4704 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
4707 /* Set invoke_impl field */
4709 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
4711 dreg = alloc_preg (cfg);
4712 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, invoke_impl));
4713 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), dreg);
4715 dreg = alloc_preg (cfg);
4716 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, method_ptr));
4717 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr), dreg);
4720 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
4726 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
4728 MonoJitICallInfo *info;
4730 /* Need to register the icall so it gets an icall wrapper */
4731 info = mono_get_array_new_va_icall (rank);
4733 cfg->flags |= MONO_CFG_HAS_VARARGS;
4735 /* mono_array_new_va () needs a vararg calling convention */
4736 cfg->disable_llvm = TRUE;
4738 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
4739 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
4743 mono_emit_load_got_addr (MonoCompile *cfg)
4745 MonoInst *getaddr, *dummy_use;
4747 if (!cfg->got_var || cfg->got_var_allocated)
4750 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
4751 getaddr->cil_code = cfg->header->code;
4752 getaddr->dreg = cfg->got_var->dreg;
4754 /* Add it to the start of the first bblock */
4755 if (cfg->bb_entry->code) {
4756 getaddr->next = cfg->bb_entry->code;
4757 cfg->bb_entry->code = getaddr;
4760 MONO_ADD_INS (cfg->bb_entry, getaddr);
4762 cfg->got_var_allocated = TRUE;
4765 * Add a dummy use to keep the got_var alive, since real uses might
4766 * only be generated by the back ends.
4767 * Add it to end_bblock, so the variable's lifetime covers the whole
4769 * It would be better to make the usage of the got var explicit in all
4770 * cases when the backend needs it (i.e. calls, throw etc.), so this
4771 * wouldn't be needed.
4773 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
4774 MONO_ADD_INS (cfg->bb_exit, dummy_use);
4777 static int inline_limit;
4778 static gboolean inline_limit_inited;
4781 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
4783 MonoMethodHeaderSummary header;
4785 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
4786 MonoMethodSignature *sig = mono_method_signature (method);
4790 if (cfg->disable_inline)
4792 if (cfg->generic_sharing_context)
4795 if (cfg->inline_depth > 10)
4798 #ifdef MONO_ARCH_HAVE_LMF_OPS
4799 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
4800 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
4801 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
4806 if (!mono_method_get_header_summary (method, &header))
4809 /*runtime, icall and pinvoke are checked by summary call*/
4810 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
4811 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
4812 (mono_class_is_marshalbyref (method->klass)) ||
4816 /* also consider num_locals? */
4817 /* Do the size check early to avoid creating vtables */
4818 if (!inline_limit_inited) {
4819 if (g_getenv ("MONO_INLINELIMIT"))
4820 inline_limit = atoi (g_getenv ("MONO_INLINELIMIT"));
4822 inline_limit = INLINE_LENGTH_LIMIT;
4823 inline_limit_inited = TRUE;
4825 if (header.code_size >= inline_limit && !(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
4829 * if we can initialize the class of the method right away, we do,
4830 * otherwise we don't allow inlining if the class needs initialization,
4831 * since it would mean inserting a call to mono_runtime_class_init()
4832 * inside the inlined code
4834 if (!(cfg->opt & MONO_OPT_SHARED)) {
4835 /* The AggressiveInlining hint is a good excuse to force that cctor to run. */
4836 if (method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING) {
4837 vtable = mono_class_vtable (cfg->domain, method->klass);
4840 if (!cfg->compile_aot)
4841 mono_runtime_class_init (vtable);
4842 } else if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
4843 if (cfg->run_cctors && method->klass->has_cctor) {
4844 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
4845 if (!method->klass->runtime_info)
4846 /* No vtable created yet */
4848 vtable = mono_class_vtable (cfg->domain, method->klass);
4851 /* This makes so that inline cannot trigger */
4852 /* .cctors: too many apps depend on them */
4853 /* running with a specific order... */
4854 if (! vtable->initialized)
4856 mono_runtime_class_init (vtable);
4858 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
4859 if (!method->klass->runtime_info)
4860 /* No vtable created yet */
4862 vtable = mono_class_vtable (cfg->domain, method->klass);
4865 if (!vtable->initialized)
4870 * If we're compiling for shared code
4871 * the cctor will need to be run at aot method load time, for example,
4872 * or at the end of the compilation of the inlining method.
4874 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
4879 * CAS - do not inline methods with declarative security
4880 * Note: this has to be before any possible return TRUE;
4882 if (mono_security_method_has_declsec (method))
4885 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
4886 if (mono_arch_is_soft_float ()) {
4888 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
4890 for (i = 0; i < sig->param_count; ++i)
4891 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
4896 if (g_list_find (cfg->dont_inline, method))
4903 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoClass *klass, MonoVTable *vtable)
4905 if (!cfg->compile_aot) {
4907 if (vtable->initialized)
4911 if (klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
4912 if (cfg->method == method)
4916 if (!mono_class_needs_cctor_run (klass, method))
4919 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (klass == method->klass))
4920 /* The initialization is already done before the method is called */
4927 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
4931 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
4934 if (mini_is_gsharedvt_variable_klass (cfg, klass)) {
4937 mono_class_init (klass);
4938 size = mono_class_array_element_size (klass);
4941 mult_reg = alloc_preg (cfg);
4942 array_reg = arr->dreg;
4943 index_reg = index->dreg;
4945 #if SIZEOF_REGISTER == 8
4946 /* The array reg is 64 bits but the index reg is only 32 */
4947 if (COMPILE_LLVM (cfg)) {
4949 index2_reg = index_reg;
4951 index2_reg = alloc_preg (cfg);
4952 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
4955 if (index->type == STACK_I8) {
4956 index2_reg = alloc_preg (cfg);
4957 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
4959 index2_reg = index_reg;
4964 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
4966 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4967 if (size == 1 || size == 2 || size == 4 || size == 8) {
4968 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
4970 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], MONO_STRUCT_OFFSET (MonoArray, vector));
4971 ins->klass = mono_class_get_element_class (klass);
4972 ins->type = STACK_MP;
4978 add_reg = alloc_ireg_mp (cfg);
4981 MonoInst *rgctx_ins;
4984 g_assert (cfg->generic_sharing_context);
4985 context_used = mini_class_check_context_used (cfg, klass);
4986 g_assert (context_used);
4987 rgctx_ins = emit_get_gsharedvt_info (cfg, &klass->byval_arg, MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE);
4988 MONO_EMIT_NEW_BIALU (cfg, OP_IMUL, mult_reg, index2_reg, rgctx_ins->dreg);
4990 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
4992 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
4993 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
4994 ins->klass = mono_class_get_element_class (klass);
4995 ins->type = STACK_MP;
4996 MONO_ADD_INS (cfg->cbb, ins);
5001 #ifndef MONO_ARCH_EMULATE_MUL_DIV
5003 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
5005 int bounds_reg = alloc_preg (cfg);
5006 int add_reg = alloc_ireg_mp (cfg);
5007 int mult_reg = alloc_preg (cfg);
5008 int mult2_reg = alloc_preg (cfg);
5009 int low1_reg = alloc_preg (cfg);
5010 int low2_reg = alloc_preg (cfg);
5011 int high1_reg = alloc_preg (cfg);
5012 int high2_reg = alloc_preg (cfg);
5013 int realidx1_reg = alloc_preg (cfg);
5014 int realidx2_reg = alloc_preg (cfg);
5015 int sum_reg = alloc_preg (cfg);
5016 int index1, index2, tmpreg;
5020 mono_class_init (klass);
5021 size = mono_class_array_element_size (klass);
5023 index1 = index_ins1->dreg;
5024 index2 = index_ins2->dreg;
5026 #if SIZEOF_REGISTER == 8
5027 /* The array reg is 64 bits but the index reg is only 32 */
5028 if (COMPILE_LLVM (cfg)) {
5031 tmpreg = alloc_preg (cfg);
5032 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index1);
5034 tmpreg = alloc_preg (cfg);
5035 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index2);
5039 // FIXME: Do we need to do something here for i8 indexes, like in ldelema_1_ins ?
5043 /* range checking */
5044 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
5045 arr->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
5047 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
5048 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5049 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
5050 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
5051 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5052 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
5053 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
5055 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
5056 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5057 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
5058 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
5059 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5060 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
5061 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
5063 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
5064 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
5065 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
5066 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
5067 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
5069 ins->type = STACK_MP;
5071 MONO_ADD_INS (cfg->cbb, ins);
5078 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
5082 MonoMethod *addr_method;
5085 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
5088 return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], TRUE);
5090 #ifndef MONO_ARCH_EMULATE_MUL_DIV
5091 /* emit_ldelema_2 depends on OP_LMUL */
5092 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS)) {
5093 return mini_emit_ldelema_2_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], sp [2]);
5097 element_size = mono_class_array_element_size (cmethod->klass->element_class);
5098 addr_method = mono_marshal_get_array_address (rank, element_size);
5099 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
5104 static MonoBreakPolicy
5105 always_insert_breakpoint (MonoMethod *method)
5107 return MONO_BREAK_POLICY_ALWAYS;
5110 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
5113 * mono_set_break_policy:
5114 * policy_callback: the new callback function
5116 * Allow embedders to decide wherther to actually obey breakpoint instructions
5117 * (both break IL instructions and Debugger.Break () method calls), for example
5118 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
5119 * untrusted or semi-trusted code.
5121 * @policy_callback will be called every time a break point instruction needs to
5122 * be inserted with the method argument being the method that calls Debugger.Break()
5123 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
5124 * if it wants the breakpoint to not be effective in the given method.
5125 * #MONO_BREAK_POLICY_ALWAYS is the default.
5128 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
5130 if (policy_callback)
5131 break_policy_func = policy_callback;
5133 break_policy_func = always_insert_breakpoint;
5137 should_insert_brekpoint (MonoMethod *method) {
5138 switch (break_policy_func (method)) {
5139 case MONO_BREAK_POLICY_ALWAYS:
5141 case MONO_BREAK_POLICY_NEVER:
5143 case MONO_BREAK_POLICY_ON_DBG:
5144 g_warning ("mdb no longer supported");
5147 g_warning ("Incorrect value returned from break policy callback");
5152 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
5154 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
5156 MonoInst *addr, *store, *load;
5157 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
5159 /* the bounds check is already done by the callers */
5160 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
5162 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
5163 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
5164 if (mini_type_is_reference (cfg, fsig->params [2]))
5165 emit_write_barrier (cfg, addr, load);
5167 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
5168 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
5175 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
5177 return mini_type_is_reference (cfg, &klass->byval_arg);
5181 emit_array_store (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, gboolean safety_checks)
5183 if (safety_checks && generic_class_is_reference_type (cfg, klass) &&
5184 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
5185 MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class, 1);
5186 MonoMethod *helper = mono_marshal_get_virtual_stelemref (obj_array);
5187 MonoInst *iargs [3];
5190 mono_class_setup_vtable (obj_array);
5191 g_assert (helper->slot);
5193 if (sp [0]->type != STACK_OBJ)
5195 if (sp [2]->type != STACK_OBJ)
5202 return mono_emit_method_call (cfg, helper, iargs, sp [0]);
5206 if (mini_is_gsharedvt_variable_klass (cfg, klass)) {
5209 // FIXME-VT: OP_ICONST optimization
5210 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
5211 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5212 ins->opcode = OP_STOREV_MEMBASE;
5213 } else if (sp [1]->opcode == OP_ICONST) {
5214 int array_reg = sp [0]->dreg;
5215 int index_reg = sp [1]->dreg;
5216 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
5219 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
5220 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
5222 MonoInst *addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], safety_checks);
5223 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5224 if (generic_class_is_reference_type (cfg, klass))
5225 emit_write_barrier (cfg, addr, sp [2]);
5232 emit_array_unsafe_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
5237 eklass = mono_class_from_mono_type (fsig->params [2]);
5239 eklass = mono_class_from_mono_type (fsig->ret);
5242 return emit_array_store (cfg, eklass, args, FALSE);
5244 MonoInst *ins, *addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
5245 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &eklass->byval_arg, addr->dreg, 0);
5251 is_unsafe_mov_compatible (MonoClass *param_klass, MonoClass *return_klass)
5255 //Only allow for valuetypes
5256 if (!param_klass->valuetype || !return_klass->valuetype)
5260 if (param_klass->has_references || return_klass->has_references)
5263 /* Avoid mixing structs and primitive types/enums, they need to be handled differently in the JIT */
5264 if ((MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && !MONO_TYPE_ISSTRUCT (&return_klass->byval_arg)) ||
5265 (!MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && MONO_TYPE_ISSTRUCT (&return_klass->byval_arg)))
5268 if (param_klass->byval_arg.type == MONO_TYPE_R4 || param_klass->byval_arg.type == MONO_TYPE_R8 ||
5269 return_klass->byval_arg.type == MONO_TYPE_R4 || return_klass->byval_arg.type == MONO_TYPE_R8)
5272 //And have the same size
5273 if (mono_class_value_size (param_klass, &align) != mono_class_value_size (return_klass, &align))
5279 emit_array_unsafe_mov (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args)
5281 MonoClass *param_klass = mono_class_from_mono_type (fsig->params [0]);
5282 MonoClass *return_klass = mono_class_from_mono_type (fsig->ret);
5284 //Valuetypes that are semantically equivalent
5285 if (is_unsafe_mov_compatible (param_klass, return_klass))
5288 //Arrays of valuetypes that are semantically equivalent
5289 if (param_klass->rank == 1 && return_klass->rank == 1 && is_unsafe_mov_compatible (param_klass->element_class, return_klass->element_class))
5296 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5298 #ifdef MONO_ARCH_SIMD_INTRINSICS
5299 MonoInst *ins = NULL;
5301 if (cfg->opt & MONO_OPT_SIMD) {
5302 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
5308 return mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
5312 emit_memory_barrier (MonoCompile *cfg, int kind)
5314 MonoInst *ins = NULL;
5315 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
5316 MONO_ADD_INS (cfg->cbb, ins);
5317 ins->backend.memory_barrier_kind = kind;
5323 llvm_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5325 MonoInst *ins = NULL;
5328 /* The LLVM backend supports these intrinsics */
5329 if (cmethod->klass == mono_defaults.math_class) {
5330 if (strcmp (cmethod->name, "Sin") == 0) {
5332 } else if (strcmp (cmethod->name, "Cos") == 0) {
5334 } else if (strcmp (cmethod->name, "Sqrt") == 0) {
5336 } else if (strcmp (cmethod->name, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8) {
5341 MONO_INST_NEW (cfg, ins, opcode);
5342 ins->type = STACK_R8;
5343 ins->dreg = mono_alloc_freg (cfg);
5344 ins->sreg1 = args [0]->dreg;
5345 MONO_ADD_INS (cfg->cbb, ins);
5349 if (cfg->opt & MONO_OPT_CMOV) {
5350 if (strcmp (cmethod->name, "Min") == 0) {
5351 if (fsig->params [0]->type == MONO_TYPE_I4)
5353 if (fsig->params [0]->type == MONO_TYPE_U4)
5354 opcode = OP_IMIN_UN;
5355 else if (fsig->params [0]->type == MONO_TYPE_I8)
5357 else if (fsig->params [0]->type == MONO_TYPE_U8)
5358 opcode = OP_LMIN_UN;
5359 } else if (strcmp (cmethod->name, "Max") == 0) {
5360 if (fsig->params [0]->type == MONO_TYPE_I4)
5362 if (fsig->params [0]->type == MONO_TYPE_U4)
5363 opcode = OP_IMAX_UN;
5364 else if (fsig->params [0]->type == MONO_TYPE_I8)
5366 else if (fsig->params [0]->type == MONO_TYPE_U8)
5367 opcode = OP_LMAX_UN;
5372 MONO_INST_NEW (cfg, ins, opcode);
5373 ins->type = fsig->params [0]->type == MONO_TYPE_I4 ? STACK_I4 : STACK_I8;
5374 ins->dreg = mono_alloc_ireg (cfg);
5375 ins->sreg1 = args [0]->dreg;
5376 ins->sreg2 = args [1]->dreg;
5377 MONO_ADD_INS (cfg->cbb, ins);
5385 mini_emit_inst_for_sharable_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5387 if (cmethod->klass == mono_defaults.array_class) {
5388 if (strcmp (cmethod->name, "UnsafeStore") == 0)
5389 return emit_array_unsafe_access (cfg, fsig, args, TRUE);
5390 else if (strcmp (cmethod->name, "UnsafeLoad") == 0)
5391 return emit_array_unsafe_access (cfg, fsig, args, FALSE);
5392 else if (strcmp (cmethod->name, "UnsafeMov") == 0)
5393 return emit_array_unsafe_mov (cfg, fsig, args);
5400 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5402 MonoInst *ins = NULL;
5404 static MonoClass *runtime_helpers_class = NULL;
5405 if (! runtime_helpers_class)
5406 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
5407 "System.Runtime.CompilerServices", "RuntimeHelpers");
5409 if (cmethod->klass == mono_defaults.string_class) {
5410 if (strcmp (cmethod->name, "get_Chars") == 0) {
5411 int dreg = alloc_ireg (cfg);
5412 int index_reg = alloc_preg (cfg);
5413 int mult_reg = alloc_preg (cfg);
5414 int add_reg = alloc_preg (cfg);
5416 #if SIZEOF_REGISTER == 8
5417 /* The array reg is 64 bits but the index reg is only 32 */
5418 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
5420 index_reg = args [1]->dreg;
5422 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
5424 #if defined(TARGET_X86) || defined(TARGET_AMD64)
5425 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, MONO_STRUCT_OFFSET (MonoString, chars));
5426 add_reg = ins->dreg;
5427 /* Avoid a warning */
5429 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5432 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
5433 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
5434 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5435 add_reg, MONO_STRUCT_OFFSET (MonoString, chars));
5437 type_from_op (ins, NULL, NULL);
5439 } else if (strcmp (cmethod->name, "get_Length") == 0) {
5440 int dreg = alloc_ireg (cfg);
5441 /* Decompose later to allow more optimizations */
5442 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
5443 ins->type = STACK_I4;
5444 ins->flags |= MONO_INST_FAULT;
5445 cfg->cbb->has_array_access = TRUE;
5446 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
5449 } else if (strcmp (cmethod->name, "InternalSetChar") == 0) {
5450 int mult_reg = alloc_preg (cfg);
5451 int add_reg = alloc_preg (cfg);
5453 /* The corlib functions check for oob already. */
5454 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
5455 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
5456 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, MONO_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
5457 return cfg->cbb->last_ins;
5460 } else if (cmethod->klass == mono_defaults.object_class) {
5462 if (strcmp (cmethod->name, "GetType") == 0) {
5463 int dreg = alloc_ireg_ref (cfg);
5464 int vt_reg = alloc_preg (cfg);
5465 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
5466 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, MONO_STRUCT_OFFSET (MonoVTable, type));
5467 type_from_op (ins, NULL, NULL);
5470 #if !defined(MONO_ARCH_EMULATE_MUL_DIV)
5471 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0 && !mono_gc_is_moving ()) {
5472 int dreg = alloc_ireg (cfg);
5473 int t1 = alloc_ireg (cfg);
5475 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
5476 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
5477 ins->type = STACK_I4;
5481 } else if (strcmp (cmethod->name, ".ctor") == 0) {
5482 MONO_INST_NEW (cfg, ins, OP_NOP);
5483 MONO_ADD_INS (cfg->cbb, ins);
5487 } else if (cmethod->klass == mono_defaults.array_class) {
5488 if (!cfg->gsharedvt && strcmp (cmethod->name + 1, "etGenericValueImpl") == 0)
5489 return emit_array_generic_access (cfg, fsig, args, *cmethod->name == 'S');
5491 #ifndef MONO_BIG_ARRAYS
5493 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
5496 if ((strcmp (cmethod->name, "GetLength") == 0 || strcmp (cmethod->name, "GetLowerBound") == 0) && args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
5497 int dreg = alloc_ireg (cfg);
5498 int bounds_reg = alloc_ireg_mp (cfg);
5499 MonoBasicBlock *end_bb, *szarray_bb;
5500 gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
5502 NEW_BBLOCK (cfg, end_bb);
5503 NEW_BBLOCK (cfg, szarray_bb);
5505 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
5506 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
5507 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
5508 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
5509 /* Non-szarray case */
5511 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5512 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5514 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5515 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5516 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
5517 MONO_START_BB (cfg, szarray_bb);
5520 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5521 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
5523 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
5524 MONO_START_BB (cfg, end_bb);
5526 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
5527 ins->type = STACK_I4;
5533 if (cmethod->name [0] != 'g')
5536 if (strcmp (cmethod->name, "get_Rank") == 0) {
5537 int dreg = alloc_ireg (cfg);
5538 int vtable_reg = alloc_preg (cfg);
5539 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
5540 args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
5541 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
5542 vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
5543 type_from_op (ins, NULL, NULL);
5546 } else if (strcmp (cmethod->name, "get_Length") == 0) {
5547 int dreg = alloc_ireg (cfg);
5549 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5550 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
5551 type_from_op (ins, NULL, NULL);
5556 } else if (cmethod->klass == runtime_helpers_class) {
5558 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0) {
5559 EMIT_NEW_ICONST (cfg, ins, MONO_STRUCT_OFFSET (MonoString, chars));
5563 } else if (cmethod->klass == mono_defaults.thread_class) {
5564 if (strcmp (cmethod->name, "SpinWait_nop") == 0) {
5565 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
5566 MONO_ADD_INS (cfg->cbb, ins);
5568 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0) {
5569 return emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5571 } else if (cmethod->klass == mono_defaults.monitor_class) {
5572 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
5573 if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 1) {
5576 if (COMPILE_LLVM (cfg)) {
5578 * Pass the argument normally, the LLVM backend will handle the
5579 * calling convention problems.
5581 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
5583 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
5584 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
5585 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
5586 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
5589 return (MonoInst*)call;
5590 #if defined(MONO_ARCH_MONITOR_LOCK_TAKEN_REG)
5591 } else if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 2) {
5594 if (COMPILE_LLVM (cfg)) {
5596 * Pass the argument normally, the LLVM backend will handle the
5597 * calling convention problems.
5599 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER_V4, NULL, helper_sig_monitor_enter_v4_trampoline_llvm, args);
5601 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER_V4,
5602 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
5603 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg, MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
5604 mono_call_inst_add_outarg_reg (cfg, call, args [1]->dreg, MONO_ARCH_MONITOR_LOCK_TAKEN_REG, FALSE);
5607 return (MonoInst*)call;
5609 } else if (strcmp (cmethod->name, "Exit") == 0) {
5612 if (COMPILE_LLVM (cfg)) {
5613 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
5615 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
5616 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
5617 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
5618 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
5621 return (MonoInst*)call;
5624 } else if (cmethod->klass->image == mono_defaults.corlib &&
5625 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
5626 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
5629 #if SIZEOF_REGISTER == 8
5630 if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
5631 if (mono_arch_opcode_supported (OP_ATOMIC_LOAD_I8)) {
5632 MONO_INST_NEW (cfg, ins, OP_ATOMIC_LOAD_I8);
5633 ins->dreg = mono_alloc_preg (cfg);
5634 ins->sreg1 = args [0]->dreg;
5635 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_SEQ;
5636 MONO_ADD_INS (cfg->cbb, ins);
5640 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5642 /* 64 bit reads are already atomic */
5643 MONO_INST_NEW (cfg, load_ins, OP_LOADI8_MEMBASE);
5644 load_ins->dreg = mono_alloc_preg (cfg);
5645 load_ins->inst_basereg = args [0]->dreg;
5646 load_ins->inst_offset = 0;
5647 MONO_ADD_INS (cfg->cbb, load_ins);
5649 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5656 if (strcmp (cmethod->name, "Increment") == 0) {
5657 MonoInst *ins_iconst;
5660 if (fsig->params [0]->type == MONO_TYPE_I4) {
5661 opcode = OP_ATOMIC_ADD_I4;
5662 cfg->has_atomic_add_i4 = TRUE;
5664 #if SIZEOF_REGISTER == 8
5665 else if (fsig->params [0]->type == MONO_TYPE_I8)
5666 opcode = OP_ATOMIC_ADD_I8;
5669 if (!mono_arch_opcode_supported (opcode))
5671 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
5672 ins_iconst->inst_c0 = 1;
5673 ins_iconst->dreg = mono_alloc_ireg (cfg);
5674 MONO_ADD_INS (cfg->cbb, ins_iconst);
5676 MONO_INST_NEW (cfg, ins, opcode);
5677 ins->dreg = mono_alloc_ireg (cfg);
5678 ins->inst_basereg = args [0]->dreg;
5679 ins->inst_offset = 0;
5680 ins->sreg2 = ins_iconst->dreg;
5681 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
5682 MONO_ADD_INS (cfg->cbb, ins);
5684 } else if (strcmp (cmethod->name, "Decrement") == 0) {
5685 MonoInst *ins_iconst;
5688 if (fsig->params [0]->type == MONO_TYPE_I4) {
5689 opcode = OP_ATOMIC_ADD_I4;
5690 cfg->has_atomic_add_i4 = TRUE;
5692 #if SIZEOF_REGISTER == 8
5693 else if (fsig->params [0]->type == MONO_TYPE_I8)
5694 opcode = OP_ATOMIC_ADD_I8;
5697 if (!mono_arch_opcode_supported (opcode))
5699 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
5700 ins_iconst->inst_c0 = -1;
5701 ins_iconst->dreg = mono_alloc_ireg (cfg);
5702 MONO_ADD_INS (cfg->cbb, ins_iconst);
5704 MONO_INST_NEW (cfg, ins, opcode);
5705 ins->dreg = mono_alloc_ireg (cfg);
5706 ins->inst_basereg = args [0]->dreg;
5707 ins->inst_offset = 0;
5708 ins->sreg2 = ins_iconst->dreg;
5709 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
5710 MONO_ADD_INS (cfg->cbb, ins);
5712 } else if (strcmp (cmethod->name, "Add") == 0) {
5715 if (fsig->params [0]->type == MONO_TYPE_I4) {
5716 opcode = OP_ATOMIC_ADD_I4;
5717 cfg->has_atomic_add_i4 = TRUE;
5719 #if SIZEOF_REGISTER == 8
5720 else if (fsig->params [0]->type == MONO_TYPE_I8)
5721 opcode = OP_ATOMIC_ADD_I8;
5724 if (!mono_arch_opcode_supported (opcode))
5726 MONO_INST_NEW (cfg, ins, opcode);
5727 ins->dreg = mono_alloc_ireg (cfg);
5728 ins->inst_basereg = args [0]->dreg;
5729 ins->inst_offset = 0;
5730 ins->sreg2 = args [1]->dreg;
5731 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
5732 MONO_ADD_INS (cfg->cbb, ins);
5735 else if (strcmp (cmethod->name, "Exchange") == 0) {
5737 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [0]);
5739 if (fsig->params [0]->type == MONO_TYPE_I4) {
5740 opcode = OP_ATOMIC_EXCHANGE_I4;
5741 cfg->has_atomic_exchange_i4 = TRUE;
5743 #if SIZEOF_REGISTER == 8
5744 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I8) ||
5745 (fsig->params [0]->type == MONO_TYPE_I))
5746 opcode = OP_ATOMIC_EXCHANGE_I8;
5748 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I)) {
5749 opcode = OP_ATOMIC_EXCHANGE_I4;
5750 cfg->has_atomic_exchange_i4 = TRUE;
5756 if (!mono_arch_opcode_supported (opcode))
5759 MONO_INST_NEW (cfg, ins, opcode);
5760 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : mono_alloc_ireg (cfg);
5761 ins->inst_basereg = args [0]->dreg;
5762 ins->inst_offset = 0;
5763 ins->sreg2 = args [1]->dreg;
5764 MONO_ADD_INS (cfg->cbb, ins);
5766 switch (fsig->params [0]->type) {
5768 ins->type = STACK_I4;
5772 ins->type = STACK_I8;
5774 case MONO_TYPE_OBJECT:
5775 ins->type = STACK_OBJ;
5778 g_assert_not_reached ();
5781 if (cfg->gen_write_barriers && is_ref)
5782 emit_write_barrier (cfg, args [0], args [1]);
5784 else if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
5786 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [1]);
5787 if (fsig->params [1]->type == MONO_TYPE_I4)
5789 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I)
5790 size = sizeof (gpointer);
5791 else if (sizeof (gpointer) == 8 && fsig->params [1]->type == MONO_TYPE_I8)
5794 if (!mono_arch_opcode_supported (OP_ATOMIC_CAS_I4))
5796 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
5797 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
5798 ins->sreg1 = args [0]->dreg;
5799 ins->sreg2 = args [1]->dreg;
5800 ins->sreg3 = args [2]->dreg;
5801 ins->type = STACK_I4;
5802 MONO_ADD_INS (cfg->cbb, ins);
5803 cfg->has_atomic_cas_i4 = TRUE;
5804 } else if (size == 8) {
5805 if (!mono_arch_opcode_supported (OP_ATOMIC_CAS_I8))
5807 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I8);
5808 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
5809 ins->sreg1 = args [0]->dreg;
5810 ins->sreg2 = args [1]->dreg;
5811 ins->sreg3 = args [2]->dreg;
5812 ins->type = STACK_I8;
5813 MONO_ADD_INS (cfg->cbb, ins);
5815 /* g_assert_not_reached (); */
5817 if (cfg->gen_write_barriers && is_ref)
5818 emit_write_barrier (cfg, args [0], args [1]);
5820 else if (strcmp (cmethod->name, "MemoryBarrier") == 0)
5821 ins = emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5825 } else if (cmethod->klass->image == mono_defaults.corlib &&
5826 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
5827 (strcmp (cmethod->klass->name, "Volatile") == 0)) {
5830 if (!strcmp (cmethod->name, "Read")) {
5832 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [0]);
5834 if (fsig->params [0]->type == MONO_TYPE_I1)
5835 opcode = OP_ATOMIC_LOAD_I1;
5836 else if (fsig->params [0]->type == MONO_TYPE_U1 || fsig->params [0]->type == MONO_TYPE_BOOLEAN)
5837 opcode = OP_ATOMIC_LOAD_U1;
5838 else if (fsig->params [0]->type == MONO_TYPE_I2)
5839 opcode = OP_ATOMIC_LOAD_I2;
5840 else if (fsig->params [0]->type == MONO_TYPE_U2)
5841 opcode = OP_ATOMIC_LOAD_U2;
5842 else if (fsig->params [0]->type == MONO_TYPE_I4)
5843 opcode = OP_ATOMIC_LOAD_I4;
5844 else if (fsig->params [0]->type == MONO_TYPE_U4)
5845 opcode = OP_ATOMIC_LOAD_U4;
5846 #if SIZEOF_REGISTER == 8
5847 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_I)
5848 opcode = OP_ATOMIC_LOAD_I8;
5849 else if (is_ref || fsig->params [0]->type == MONO_TYPE_U8 || fsig->params [0]->type == MONO_TYPE_U)
5850 opcode = OP_ATOMIC_LOAD_U8;
5852 else if (fsig->params [0]->type == MONO_TYPE_I)
5853 opcode = OP_ATOMIC_LOAD_I4;
5854 else if (is_ref || fsig->params [0]->type == MONO_TYPE_U)
5855 opcode = OP_ATOMIC_LOAD_U4;
5859 if (!mono_arch_opcode_supported (opcode))
5862 MONO_INST_NEW (cfg, ins, opcode);
5863 ins->dreg = mono_alloc_ireg (cfg);
5864 ins->sreg1 = args [0]->dreg;
5865 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_ACQ;
5866 MONO_ADD_INS (cfg->cbb, ins);
5870 if (!strcmp (cmethod->name, "Write")) {
5872 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [0]);
5874 if (fsig->params [0]->type == MONO_TYPE_I1)
5875 opcode = OP_ATOMIC_STORE_I1;
5876 else if (fsig->params [0]->type == MONO_TYPE_U1 || fsig->params [0]->type == MONO_TYPE_BOOLEAN)
5877 opcode = OP_ATOMIC_STORE_U1;
5878 else if (fsig->params [0]->type == MONO_TYPE_I2)
5879 opcode = OP_ATOMIC_STORE_I2;
5880 else if (fsig->params [0]->type == MONO_TYPE_U2)
5881 opcode = OP_ATOMIC_STORE_U2;
5882 else if (fsig->params [0]->type == MONO_TYPE_I4)
5883 opcode = OP_ATOMIC_STORE_I4;
5884 else if (fsig->params [0]->type == MONO_TYPE_U4)
5885 opcode = OP_ATOMIC_STORE_U4;
5886 #if SIZEOF_REGISTER == 8
5887 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_I)
5888 opcode = OP_ATOMIC_STORE_I8;
5889 else if (is_ref || fsig->params [0]->type == MONO_TYPE_U8 || fsig->params [0]->type == MONO_TYPE_U)
5890 opcode = OP_ATOMIC_STORE_U8;
5892 else if (fsig->params [0]->type == MONO_TYPE_I)
5893 opcode = OP_ATOMIC_STORE_I4;
5894 else if (is_ref || fsig->params [0]->type == MONO_TYPE_U)
5895 opcode = OP_ATOMIC_STORE_U4;
5899 if (!mono_arch_opcode_supported (opcode))
5902 MONO_INST_NEW (cfg, ins, opcode);
5903 ins->dreg = args [0]->dreg;
5904 ins->sreg1 = args [1]->dreg;
5905 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_REL;
5906 MONO_ADD_INS (cfg->cbb, ins);
5908 if (cfg->gen_write_barriers && is_ref)
5909 emit_write_barrier (cfg, args [0], args [1]);
5915 } else if (cmethod->klass->image == mono_defaults.corlib) {
5916 if (cmethod->name [0] == 'B' && strcmp (cmethod->name, "Break") == 0
5917 && strcmp (cmethod->klass->name, "Debugger") == 0) {
5918 if (should_insert_brekpoint (cfg->method)) {
5919 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
5921 MONO_INST_NEW (cfg, ins, OP_NOP);
5922 MONO_ADD_INS (cfg->cbb, ins);
5926 if (cmethod->name [0] == 'g' && strcmp (cmethod->name, "get_IsRunningOnWindows") == 0
5927 && strcmp (cmethod->klass->name, "Environment") == 0) {
5929 EMIT_NEW_ICONST (cfg, ins, 1);
5931 EMIT_NEW_ICONST (cfg, ins, 0);
5935 } else if (cmethod->klass == mono_defaults.math_class) {
5937 * There is general branches code for Min/Max, but it does not work for
5939 * http://everything2.com/?node_id=1051618
5941 } else if ((!strcmp (cmethod->klass->image->assembly->aname.name, "MonoMac") || !strcmp (cmethod->klass->image->assembly->aname.name, "monotouch")) && !strcmp (cmethod->klass->name, "Selector") && !strcmp (cmethod->name, "GetHandle") && cfg->compile_aot && (args [0]->opcode == OP_GOT_ENTRY || args[0]->opcode == OP_AOTCONST)) {
5942 #ifdef MONO_ARCH_HAVE_OBJC_GET_SELECTOR
5944 MonoJumpInfoToken *ji;
5947 cfg->disable_llvm = TRUE;
5949 if (args [0]->opcode == OP_GOT_ENTRY) {
5950 pi = args [0]->inst_p1;
5951 g_assert (pi->opcode == OP_PATCH_INFO);
5952 g_assert (GPOINTER_TO_INT (pi->inst_p1) == MONO_PATCH_INFO_LDSTR);
5955 g_assert (GPOINTER_TO_INT (args [0]->inst_p1) == MONO_PATCH_INFO_LDSTR);
5956 ji = args [0]->inst_p0;
5959 NULLIFY_INS (args [0]);
5962 s = mono_ldstr (cfg->domain, ji->image, mono_metadata_token_index (ji->token));
5963 MONO_INST_NEW (cfg, ins, OP_OBJC_GET_SELECTOR);
5964 ins->dreg = mono_alloc_ireg (cfg);
5966 ins->inst_p0 = mono_string_to_utf8 (s);
5967 MONO_ADD_INS (cfg->cbb, ins);
5972 #ifdef MONO_ARCH_SIMD_INTRINSICS
5973 if (cfg->opt & MONO_OPT_SIMD) {
5974 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
5980 ins = mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
5984 if (COMPILE_LLVM (cfg)) {
5985 ins = llvm_emit_inst_for_method (cfg, cmethod, fsig, args);
5990 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
5994 * This entry point could be used later for arbitrary method
5997 inline static MonoInst*
5998 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
5999 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
6001 if (method->klass == mono_defaults.string_class) {
6002 /* managed string allocation support */
6003 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS) && !(cfg->opt & MONO_OPT_SHARED)) {
6004 MonoInst *iargs [2];
6005 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
6006 MonoMethod *managed_alloc = NULL;
6008 g_assert (vtable); /*Should not fail since it System.String*/
6009 #ifndef MONO_CROSS_COMPILE
6010 managed_alloc = mono_gc_get_managed_allocator (method->klass, FALSE);
6014 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
6015 iargs [1] = args [0];
6016 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
6023 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
6025 MonoInst *store, *temp;
6028 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
6029 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
6032 * FIXME: We should use *args++ = sp [0], but that would mean the arg
6033 * would be different than the MonoInst's used to represent arguments, and
6034 * the ldelema implementation can't deal with that.
6035 * Solution: When ldelema is used on an inline argument, create a var for
6036 * it, emit ldelema on that var, and emit the saving code below in
6037 * inline_method () if needed.
6039 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
6040 cfg->args [i] = temp;
6041 /* This uses cfg->args [i] which is set by the preceeding line */
6042 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
6043 store->cil_code = sp [0]->cil_code;
6048 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
6049 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
6051 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
6053 check_inline_called_method_name_limit (MonoMethod *called_method)
6056 static const char *limit = NULL;
6058 if (limit == NULL) {
6059 const char *limit_string = g_getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
6061 if (limit_string != NULL)
6062 limit = limit_string;
6067 if (limit [0] != '\0') {
6068 char *called_method_name = mono_method_full_name (called_method, TRUE);
6070 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
6071 g_free (called_method_name);
6073 //return (strncmp_result <= 0);
6074 return (strncmp_result == 0);
6081 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
6083 check_inline_caller_method_name_limit (MonoMethod *caller_method)
6086 static const char *limit = NULL;
6088 if (limit == NULL) {
6089 const char *limit_string = g_getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
6090 if (limit_string != NULL) {
6091 limit = limit_string;
6097 if (limit [0] != '\0') {
6098 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
6100 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
6101 g_free (caller_method_name);
6103 //return (strncmp_result <= 0);
6104 return (strncmp_result == 0);
6112 emit_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
6114 static double r8_0 = 0.0;
6118 rtype = mini_replace_type (rtype);
6122 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
6123 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
6124 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
6125 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
6126 MONO_EMIT_NEW_I8CONST (cfg, dreg, 0);
6127 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
6128 MONO_INST_NEW (cfg, ins, OP_R8CONST);
6129 ins->type = STACK_R8;
6130 ins->inst_p0 = (void*)&r8_0;
6132 MONO_ADD_INS (cfg->cbb, ins);
6133 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
6134 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
6135 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
6136 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (cfg, rtype)) {
6137 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
6139 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
6144 emit_dummy_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
6148 rtype = mini_replace_type (rtype);
6152 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_PCONST);
6153 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
6154 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_ICONST);
6155 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
6156 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_I8CONST);
6157 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
6158 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R8CONST);
6159 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
6160 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
6161 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
6162 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (cfg, rtype)) {
6163 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
6165 emit_init_rvar (cfg, dreg, rtype);
6169 /* If INIT is FALSE, emit dummy initialization statements to keep the IR valid */
6171 emit_init_local (MonoCompile *cfg, int local, MonoType *type, gboolean init)
6173 MonoInst *var = cfg->locals [local];
6174 if (COMPILE_SOFT_FLOAT (cfg)) {
6176 int reg = alloc_dreg (cfg, var->type);
6177 emit_init_rvar (cfg, reg, type);
6178 EMIT_NEW_LOCSTORE (cfg, store, local, cfg->cbb->last_ins);
6181 emit_init_rvar (cfg, var->dreg, type);
6183 emit_dummy_init_rvar (cfg, var->dreg, type);
6190 * Return the cost of inlining CMETHOD.
6193 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
6194 guchar *ip, guint real_offset, gboolean inline_always, MonoBasicBlock **out_cbb)
6196 MonoInst *ins, *rvar = NULL;
6197 MonoMethodHeader *cheader;
6198 MonoBasicBlock *ebblock, *sbblock;
6200 MonoMethod *prev_inlined_method;
6201 MonoInst **prev_locals, **prev_args;
6202 MonoType **prev_arg_types;
6203 guint prev_real_offset;
6204 GHashTable *prev_cbb_hash;
6205 MonoBasicBlock **prev_cil_offset_to_bb;
6206 MonoBasicBlock *prev_cbb;
6207 unsigned char* prev_cil_start;
6208 guint32 prev_cil_offset_to_bb_len;
6209 MonoMethod *prev_current_method;
6210 MonoGenericContext *prev_generic_context;
6211 gboolean ret_var_set, prev_ret_var_set, prev_disable_inline, virtual = FALSE;
6213 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
6215 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
6216 if ((! inline_always) && ! check_inline_called_method_name_limit (cmethod))
6219 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
6220 if ((! inline_always) && ! check_inline_caller_method_name_limit (cfg->method))
6224 if (cfg->verbose_level > 2)
6225 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
6227 if (!cmethod->inline_info) {
6228 cfg->stat_inlineable_methods++;
6229 cmethod->inline_info = 1;
6232 /* allocate local variables */
6233 cheader = mono_method_get_header (cmethod);
6235 if (cheader == NULL || mono_loader_get_last_error ()) {
6236 MonoLoaderError *error = mono_loader_get_last_error ();
6239 mono_metadata_free_mh (cheader);
6240 if (inline_always && error)
6241 mono_cfg_set_exception (cfg, error->exception_type);
6243 mono_loader_clear_error ();
6247 /*Must verify before creating locals as it can cause the JIT to assert.*/
6248 if (mono_compile_is_broken (cfg, cmethod, FALSE)) {
6249 mono_metadata_free_mh (cheader);
6253 /* allocate space to store the return value */
6254 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6255 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
6258 prev_locals = cfg->locals;
6259 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
6260 for (i = 0; i < cheader->num_locals; ++i)
6261 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
6263 /* allocate start and end blocks */
6264 /* This is needed so if the inline is aborted, we can clean up */
6265 NEW_BBLOCK (cfg, sbblock);
6266 sbblock->real_offset = real_offset;
6268 NEW_BBLOCK (cfg, ebblock);
6269 ebblock->block_num = cfg->num_bblocks++;
6270 ebblock->real_offset = real_offset;
6272 prev_args = cfg->args;
6273 prev_arg_types = cfg->arg_types;
6274 prev_inlined_method = cfg->inlined_method;
6275 cfg->inlined_method = cmethod;
6276 cfg->ret_var_set = FALSE;
6277 cfg->inline_depth ++;
6278 prev_real_offset = cfg->real_offset;
6279 prev_cbb_hash = cfg->cbb_hash;
6280 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
6281 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
6282 prev_cil_start = cfg->cil_start;
6283 prev_cbb = cfg->cbb;
6284 prev_current_method = cfg->current_method;
6285 prev_generic_context = cfg->generic_context;
6286 prev_ret_var_set = cfg->ret_var_set;
6287 prev_disable_inline = cfg->disable_inline;
6289 if (*ip == CEE_CALLVIRT && !(cmethod->flags & METHOD_ATTRIBUTE_STATIC))
6292 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, sp, real_offset, virtual);
6294 ret_var_set = cfg->ret_var_set;
6296 cfg->inlined_method = prev_inlined_method;
6297 cfg->real_offset = prev_real_offset;
6298 cfg->cbb_hash = prev_cbb_hash;
6299 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
6300 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
6301 cfg->cil_start = prev_cil_start;
6302 cfg->locals = prev_locals;
6303 cfg->args = prev_args;
6304 cfg->arg_types = prev_arg_types;
6305 cfg->current_method = prev_current_method;
6306 cfg->generic_context = prev_generic_context;
6307 cfg->ret_var_set = prev_ret_var_set;
6308 cfg->disable_inline = prev_disable_inline;
6309 cfg->inline_depth --;
6311 if ((costs >= 0 && costs < 60) || inline_always) {
6312 if (cfg->verbose_level > 2)
6313 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
6315 cfg->stat_inlined_methods++;
6317 /* always add some code to avoid block split failures */
6318 MONO_INST_NEW (cfg, ins, OP_NOP);
6319 MONO_ADD_INS (prev_cbb, ins);
6321 prev_cbb->next_bb = sbblock;
6322 link_bblock (cfg, prev_cbb, sbblock);
6325 * Get rid of the begin and end bblocks if possible to aid local
6328 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
6330 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
6331 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
6333 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
6334 MonoBasicBlock *prev = ebblock->in_bb [0];
6335 mono_merge_basic_blocks (cfg, prev, ebblock);
6337 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
6338 mono_merge_basic_blocks (cfg, prev_cbb, prev);
6339 cfg->cbb = prev_cbb;
6343 * Its possible that the rvar is set in some prev bblock, but not in others.
6349 for (i = 0; i < ebblock->in_count; ++i) {
6350 bb = ebblock->in_bb [i];
6352 if (bb->last_ins && bb->last_ins->opcode == OP_NOT_REACHED) {
6355 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
6363 *out_cbb = cfg->cbb;
6367 * If the inlined method contains only a throw, then the ret var is not
6368 * set, so set it to a dummy value.
6371 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
6373 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
6376 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
6379 if (cfg->verbose_level > 2)
6380 printf ("INLINE ABORTED %s (cost %d)\n", mono_method_full_name (cmethod, TRUE), costs);
6381 cfg->exception_type = MONO_EXCEPTION_NONE;
6382 mono_loader_clear_error ();
6384 /* This gets rid of the newly added bblocks */
6385 cfg->cbb = prev_cbb;
6387 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
6392 * Some of these comments may well be out-of-date.
6393 * Design decisions: we do a single pass over the IL code (and we do bblock
6394 * splitting/merging in the few cases when it's required: a back jump to an IL
6395 * address that was not already seen as bblock starting point).
6396 * Code is validated as we go (full verification is still better left to metadata/verify.c).
6397 * Complex operations are decomposed in simpler ones right away. We need to let the
6398 * arch-specific code peek and poke inside this process somehow (except when the
6399 * optimizations can take advantage of the full semantic info of coarse opcodes).
6400 * All the opcodes of the form opcode.s are 'normalized' to opcode.
6401 * MonoInst->opcode initially is the IL opcode or some simplification of that
6402 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
6403 * opcode with value bigger than OP_LAST.
6404 * At this point the IR can be handed over to an interpreter, a dumb code generator
6405 * or to the optimizing code generator that will translate it to SSA form.
6407 * Profiling directed optimizations.
6408 * We may compile by default with few or no optimizations and instrument the code
6409 * or the user may indicate what methods to optimize the most either in a config file
6410 * or through repeated runs where the compiler applies offline the optimizations to
6411 * each method and then decides if it was worth it.
6414 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
6415 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
6416 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
6417 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
6418 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
6419 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
6420 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
6421 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) TYPE_LOAD_ERROR ((klass))
6423 /* offset from br.s -> br like opcodes */
6424 #define BIG_BRANCH_OFFSET 13
6427 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
6429 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
6431 return b == NULL || b == bb;
6435 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
6437 unsigned char *ip = start;
6438 unsigned char *target;
6441 MonoBasicBlock *bblock;
6442 const MonoOpcode *opcode;
6445 cli_addr = ip - start;
6446 i = mono_opcode_value ((const guint8 **)&ip, end);
6449 opcode = &mono_opcodes [i];
6450 switch (opcode->argument) {
6451 case MonoInlineNone:
6454 case MonoInlineString:
6455 case MonoInlineType:
6456 case MonoInlineField:
6457 case MonoInlineMethod:
6460 case MonoShortInlineR:
6467 case MonoShortInlineVar:
6468 case MonoShortInlineI:
6471 case MonoShortInlineBrTarget:
6472 target = start + cli_addr + 2 + (signed char)ip [1];
6473 GET_BBLOCK (cfg, bblock, target);
6476 GET_BBLOCK (cfg, bblock, ip);
6478 case MonoInlineBrTarget:
6479 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
6480 GET_BBLOCK (cfg, bblock, target);
6483 GET_BBLOCK (cfg, bblock, ip);
6485 case MonoInlineSwitch: {
6486 guint32 n = read32 (ip + 1);
6489 cli_addr += 5 + 4 * n;
6490 target = start + cli_addr;
6491 GET_BBLOCK (cfg, bblock, target);
6493 for (j = 0; j < n; ++j) {
6494 target = start + cli_addr + (gint32)read32 (ip);
6495 GET_BBLOCK (cfg, bblock, target);
6505 g_assert_not_reached ();
6508 if (i == CEE_THROW) {
6509 unsigned char *bb_start = ip - 1;
6511 /* Find the start of the bblock containing the throw */
6513 while ((bb_start >= start) && !bblock) {
6514 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
6518 bblock->out_of_line = 1;
6528 static inline MonoMethod *
6529 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
6533 if (m->wrapper_type != MONO_WRAPPER_NONE) {
6534 method = mono_method_get_wrapper_data (m, token);
6537 method = mono_class_inflate_generic_method_checked (method, context, &error);
6538 g_assert (mono_error_ok (&error)); /* FIXME don't swallow the error */
6541 method = mono_get_method_full (m->klass->image, token, klass, context);
6547 static inline MonoMethod *
6548 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
6550 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
6552 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
6558 static inline MonoClass*
6559 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
6564 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6565 klass = mono_method_get_wrapper_data (method, token);
6567 klass = mono_class_inflate_generic_class (klass, context);
6569 klass = mono_class_get_and_inflate_typespec_checked (method->klass->image, token, context, &error);
6570 mono_error_cleanup (&error); /* FIXME don't swallow the error */
6573 mono_class_init (klass);
6577 static inline MonoMethodSignature*
6578 mini_get_signature (MonoMethod *method, guint32 token, MonoGenericContext *context)
6580 MonoMethodSignature *fsig;
6582 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6585 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
6587 fsig = mono_inflate_generic_signature (fsig, context, &error);
6589 g_assert (mono_error_ok (&error));
6592 fsig = mono_metadata_parse_signature (method->klass->image, token);
6598 * Returns TRUE if the JIT should abort inlining because "callee"
6599 * is influenced by security attributes.
6602 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
6606 if ((cfg->method != caller) && mono_security_method_has_declsec (callee)) {
6610 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
6611 if (result == MONO_JIT_SECURITY_OK)
6614 if (result == MONO_JIT_LINKDEMAND_ECMA) {
6615 /* Generate code to throw a SecurityException before the actual call/link */
6616 MonoSecurityManager *secman = mono_security_manager_get_methods ();
6619 NEW_ICONST (cfg, args [0], 4);
6620 NEW_METHODCONST (cfg, args [1], caller);
6621 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
6622 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
6623 /* don't hide previous results */
6624 mono_cfg_set_exception (cfg, MONO_EXCEPTION_SECURITY_LINKDEMAND);
6625 cfg->exception_data = result;
6633 throw_exception (void)
6635 static MonoMethod *method = NULL;
6638 MonoSecurityManager *secman = mono_security_manager_get_methods ();
6639 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
6646 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
6648 MonoMethod *thrower = throw_exception ();
6651 EMIT_NEW_PCONST (cfg, args [0], ex);
6652 mono_emit_method_call (cfg, thrower, args, NULL);
6656 * Return the original method is a wrapper is specified. We can only access
6657 * the custom attributes from the original method.
6660 get_original_method (MonoMethod *method)
6662 if (method->wrapper_type == MONO_WRAPPER_NONE)
6665 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
6666 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
6669 /* in other cases we need to find the original method */
6670 return mono_marshal_method_from_wrapper (method);
6674 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
6675 MonoBasicBlock *bblock, unsigned char *ip)
6677 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
6678 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
6680 emit_throw_exception (cfg, ex);
6684 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
6685 MonoBasicBlock *bblock, unsigned char *ip)
6687 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
6688 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
6690 emit_throw_exception (cfg, ex);
6694 * Check that the IL instructions at ip are the array initialization
6695 * sequence and return the pointer to the data and the size.
6698 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
6701 * newarr[System.Int32]
6703 * ldtoken field valuetype ...
6704 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
6706 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
6708 guint32 token = read32 (ip + 7);
6709 guint32 field_token = read32 (ip + 2);
6710 guint32 field_index = field_token & 0xffffff;
6712 const char *data_ptr;
6714 MonoMethod *cmethod;
6715 MonoClass *dummy_class;
6716 MonoClassField *field = mono_field_from_token_checked (method->klass->image, field_token, &dummy_class, NULL, &error);
6720 mono_error_cleanup (&error); /* FIXME don't swallow the error */
6724 *out_field_token = field_token;
6726 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
6729 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
6731 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
6732 case MONO_TYPE_BOOLEAN:
6736 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
6737 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
6738 case MONO_TYPE_CHAR:
6755 if (size > mono_type_size (field->type, &dummy_align))
6758 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
6759 if (!image_is_dynamic (method->klass->image)) {
6760 field_index = read32 (ip + 2) & 0xffffff;
6761 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
6762 data_ptr = mono_image_rva_map (method->klass->image, rva);
6763 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
6764 /* for aot code we do the lookup on load */
6765 if (aot && data_ptr)
6766 return GUINT_TO_POINTER (rva);
6768 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
6770 data_ptr = mono_field_get_data (field);
6778 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
6780 char *method_fname = mono_method_full_name (method, TRUE);
6782 MonoMethodHeader *header = mono_method_get_header (method);
6784 if (header->code_size == 0)
6785 method_code = g_strdup ("method body is empty.");
6787 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
6788 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
6789 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
6790 g_free (method_fname);
6791 g_free (method_code);
6792 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
6796 set_exception_object (MonoCompile *cfg, MonoException *exception)
6798 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OBJECT_SUPPLIED);
6799 MONO_GC_REGISTER_ROOT_SINGLE (cfg->exception_ptr);
6800 cfg->exception_ptr = exception;
6804 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
6807 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
6808 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
6809 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
6810 /* Optimize reg-reg moves away */
6812 * Can't optimize other opcodes, since sp[0] might point to
6813 * the last ins of a decomposed opcode.
6815 sp [0]->dreg = (cfg)->locals [n]->dreg;
6817 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
6822 * ldloca inhibits many optimizations so try to get rid of it in common
6825 static inline unsigned char *
6826 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
6836 local = read16 (ip + 2);
6840 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
6841 /* From the INITOBJ case */
6842 token = read32 (ip + 2);
6843 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
6844 CHECK_TYPELOAD (klass);
6845 type = mini_replace_type (&klass->byval_arg);
6846 emit_init_local (cfg, local, type, TRUE);
6854 is_exception_class (MonoClass *class)
6857 if (class == mono_defaults.exception_class)
6859 class = class->parent;
6865 * is_jit_optimizer_disabled:
6867 * Determine whenever M's assembly has a DebuggableAttribute with the
6868 * IsJITOptimizerDisabled flag set.
6871 is_jit_optimizer_disabled (MonoMethod *m)
6873 MonoAssembly *ass = m->klass->image->assembly;
6874 MonoCustomAttrInfo* attrs;
6875 static MonoClass *klass;
6877 gboolean val = FALSE;
6880 if (ass->jit_optimizer_disabled_inited)
6881 return ass->jit_optimizer_disabled;
6884 klass = mono_class_from_name (mono_defaults.corlib, "System.Diagnostics", "DebuggableAttribute");
6887 ass->jit_optimizer_disabled = FALSE;
6888 mono_memory_barrier ();
6889 ass->jit_optimizer_disabled_inited = TRUE;
6893 attrs = mono_custom_attrs_from_assembly (ass);
6895 for (i = 0; i < attrs->num_attrs; ++i) {
6896 MonoCustomAttrEntry *attr = &attrs->attrs [i];
6899 MonoMethodSignature *sig;
6901 if (!attr->ctor || attr->ctor->klass != klass)
6903 /* Decode the attribute. See reflection.c */
6904 len = attr->data_size;
6905 p = (const char*)attr->data;
6906 g_assert (read16 (p) == 0x0001);
6909 // FIXME: Support named parameters
6910 sig = mono_method_signature (attr->ctor);
6911 if (sig->param_count != 2 || sig->params [0]->type != MONO_TYPE_BOOLEAN || sig->params [1]->type != MONO_TYPE_BOOLEAN)
6913 /* Two boolean arguments */
6917 mono_custom_attrs_free (attrs);
6920 ass->jit_optimizer_disabled = val;
6921 mono_memory_barrier ();
6922 ass->jit_optimizer_disabled_inited = TRUE;
6928 is_supported_tail_call (MonoCompile *cfg, MonoMethod *method, MonoMethod *cmethod, MonoMethodSignature *fsig, int call_opcode)
6930 gboolean supported_tail_call;
6933 #ifdef MONO_ARCH_HAVE_OP_TAIL_CALL
6934 supported_tail_call = mono_arch_tail_call_supported (cfg, mono_method_signature (method), mono_method_signature (cmethod));
6936 supported_tail_call = mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret);
6939 for (i = 0; i < fsig->param_count; ++i) {
6940 if (fsig->params [i]->byref || fsig->params [i]->type == MONO_TYPE_PTR || fsig->params [i]->type == MONO_TYPE_FNPTR)
6941 /* These can point to the current method's stack */
6942 supported_tail_call = FALSE;
6944 if (fsig->hasthis && cmethod->klass->valuetype)
6945 /* this might point to the current method's stack */
6946 supported_tail_call = FALSE;
6947 if (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)
6948 supported_tail_call = FALSE;
6949 if (cfg->method->save_lmf)
6950 supported_tail_call = FALSE;
6951 if (cmethod->wrapper_type && cmethod->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
6952 supported_tail_call = FALSE;
6953 if (call_opcode != CEE_CALL)
6954 supported_tail_call = FALSE;
6956 /* Debugging support */
6958 if (supported_tail_call) {
6959 if (!mono_debug_count ())
6960 supported_tail_call = FALSE;
6964 return supported_tail_call;
6967 /* the JIT intercepts ldflda instructions to the tlsdata field in ThreadLocal<T> and redirects
6968 * it to the thread local value based on the tls_offset field. Every other kind of access to
6969 * the field causes an assert.
6972 is_magic_tls_access (MonoClassField *field)
6974 if (strcmp (field->name, "tlsdata"))
6976 if (strcmp (field->parent->name, "ThreadLocal`1"))
6978 return field->parent->image == mono_defaults.corlib;
6981 /* emits the code needed to access a managed tls var (like ThreadStatic)
6982 * with the value of the tls offset in offset_reg. thread_ins represents the MonoInternalThread
6983 * pointer for the current thread.
6984 * Returns the MonoInst* representing the address of the tls var.
6987 emit_managed_static_data_access (MonoCompile *cfg, MonoInst *thread_ins, int offset_reg)
6990 int static_data_reg, array_reg, dreg;
6991 int offset2_reg, idx_reg;
6992 // inlined access to the tls data
6993 // idx = (offset >> 24) - 1;
6994 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
6995 static_data_reg = alloc_ireg (cfg);
6996 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, MONO_STRUCT_OFFSET (MonoInternalThread, static_data));
6997 idx_reg = alloc_ireg (cfg);
6998 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
6999 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
7000 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
7001 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
7002 array_reg = alloc_ireg (cfg);
7003 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
7004 offset2_reg = alloc_ireg (cfg);
7005 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
7006 dreg = alloc_ireg (cfg);
7007 EMIT_NEW_BIALU (cfg, addr, OP_PADD, dreg, array_reg, offset2_reg);
7012 * redirect access to the tlsdata field to the tls var given by the tls_offset field.
7013 * this address is cached per-method in cached_tls_addr.
7016 create_magic_tls_access (MonoCompile *cfg, MonoClassField *tls_field, MonoInst **cached_tls_addr, MonoInst *thread_local)
7018 MonoInst *load, *addr, *temp, *store, *thread_ins;
7019 MonoClassField *offset_field;
7021 if (*cached_tls_addr) {
7022 EMIT_NEW_TEMPLOAD (cfg, addr, (*cached_tls_addr)->inst_c0);
7025 thread_ins = mono_get_thread_intrinsic (cfg);
7026 offset_field = mono_class_get_field_from_name (tls_field->parent, "tls_offset");
7028 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, offset_field->type, thread_local->dreg, offset_field->offset);
7030 MONO_ADD_INS (cfg->cbb, thread_ins);
7032 MonoMethod *thread_method;
7033 thread_method = mono_class_get_method_from_name (mono_get_thread_class(), "CurrentInternalThread_internal", 0);
7034 thread_ins = mono_emit_method_call (cfg, thread_method, NULL, NULL);
7036 addr = emit_managed_static_data_access (cfg, thread_ins, load->dreg);
7037 addr->klass = mono_class_from_mono_type (tls_field->type);
7038 addr->type = STACK_MP;
7039 *cached_tls_addr = temp = mono_compile_create_var (cfg, type_from_stack_type (addr), OP_LOCAL);
7040 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, addr);
7042 EMIT_NEW_TEMPLOAD (cfg, addr, temp->inst_c0);
7049 * Handle calls made to ctors from NEWOBJ opcodes.
7051 * REF_BBLOCK will point to the current bblock after the call.
7054 handle_ctor_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used,
7055 MonoInst **sp, guint8 *ip, MonoBasicBlock **ref_bblock, int *inline_costs)
7057 MonoInst *vtable_arg = NULL, *callvirt_this_arg = NULL, *ins;
7058 MonoBasicBlock *bblock = *ref_bblock;
7060 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
7061 mono_method_is_generic_sharable (cmethod, TRUE)) {
7062 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
7063 mono_class_vtable (cfg->domain, cmethod->klass);
7064 CHECK_TYPELOAD (cmethod->klass);
7066 vtable_arg = emit_get_rgctx_method (cfg, context_used,
7067 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7070 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
7071 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7073 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7075 CHECK_TYPELOAD (cmethod->klass);
7076 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7081 /* Avoid virtual calls to ctors if possible */
7082 if (mono_class_is_marshalbyref (cmethod->klass))
7083 callvirt_this_arg = sp [0];
7085 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
7086 g_assert (MONO_TYPE_IS_VOID (fsig->ret));
7087 CHECK_CFG_EXCEPTION;
7088 } else if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
7089 mono_method_check_inlining (cfg, cmethod) &&
7090 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE)) {
7093 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, FALSE, &bblock))) {
7094 cfg->real_offset += 5;
7096 *inline_costs += costs - 5;
7097 *ref_bblock = bblock;
7099 INLINE_FAILURE ("inline failure");
7100 // FIXME-VT: Clean this up
7101 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))
7102 GSHAREDVT_FAILURE(*ip);
7103 mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, callvirt_this_arg, NULL, NULL);
7105 } else if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
7108 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE);
7109 mono_emit_calli (cfg, fsig, sp, addr, NULL, vtable_arg);
7110 } else if (context_used &&
7111 ((!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
7112 !mono_class_generic_sharing_enabled (cmethod->klass)) || cfg->gsharedvt)) {
7113 MonoInst *cmethod_addr;
7115 /* Generic calls made out of gsharedvt methods cannot be patched, so use an indirect call */
7117 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
7118 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7120 mono_emit_calli (cfg, fsig, sp, cmethod_addr, NULL, vtable_arg);
7122 INLINE_FAILURE ("ctor call");
7123 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp,
7124 callvirt_this_arg, NULL, vtable_arg);
7131 * mono_method_to_ir:
7133 * Translate the .net IL into linear IR.
7136 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
7137 MonoInst *return_var, MonoInst **inline_args,
7138 guint inline_offset, gboolean is_virtual_call)
7141 MonoInst *ins, **sp, **stack_start;
7142 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
7143 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
7144 MonoMethod *cmethod, *method_definition;
7145 MonoInst **arg_array;
7146 MonoMethodHeader *header;
7148 guint32 token, ins_flag;
7150 MonoClass *constrained_call = NULL;
7151 unsigned char *ip, *end, *target, *err_pos;
7152 MonoMethodSignature *sig;
7153 MonoGenericContext *generic_context = NULL;
7154 MonoGenericContainer *generic_container = NULL;
7155 MonoType **param_types;
7156 int i, n, start_new_bblock, dreg;
7157 int num_calls = 0, inline_costs = 0;
7158 int breakpoint_id = 0;
7160 MonoBoolean security, pinvoke;
7161 MonoSecurityManager* secman = NULL;
7162 MonoDeclSecurityActions actions;
7163 GSList *class_inits = NULL;
7164 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
7166 gboolean init_locals, seq_points, skip_dead_blocks;
7167 gboolean sym_seq_points = FALSE;
7168 MonoInst *cached_tls_addr = NULL;
7169 MonoDebugMethodInfo *minfo;
7170 MonoBitSet *seq_point_locs = NULL;
7171 MonoBitSet *seq_point_set_locs = NULL;
7173 cfg->disable_inline = is_jit_optimizer_disabled (method);
7175 /* serialization and xdomain stuff may need access to private fields and methods */
7176 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
7177 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
7178 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
7179 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
7180 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
7181 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
7183 dont_verify |= mono_security_smcs_hack_enabled ();
7185 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
7186 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
7187 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
7188 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
7189 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_STELEMREF;
7191 image = method->klass->image;
7192 header = mono_method_get_header (method);
7194 MonoLoaderError *error;
7196 if ((error = mono_loader_get_last_error ())) {
7197 mono_cfg_set_exception (cfg, error->exception_type);
7199 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
7200 cfg->exception_message = g_strdup_printf ("Missing or incorrect header for method %s", cfg->method->name);
7202 goto exception_exit;
7204 generic_container = mono_method_get_generic_container (method);
7205 sig = mono_method_signature (method);
7206 num_args = sig->hasthis + sig->param_count;
7207 ip = (unsigned char*)header->code;
7208 cfg->cil_start = ip;
7209 end = ip + header->code_size;
7210 cfg->stat_cil_code_size += header->code_size;
7212 seq_points = cfg->gen_seq_points && cfg->method == method;
7213 #ifdef PLATFORM_ANDROID
7214 seq_points &= cfg->method->wrapper_type == MONO_WRAPPER_NONE;
7217 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
7218 /* We could hit a seq point before attaching to the JIT (#8338) */
7222 if (cfg->gen_seq_points_debug_data && cfg->method == method) {
7223 minfo = mono_debug_lookup_method (method);
7225 int i, n_il_offsets;
7229 mono_debug_symfile_get_line_numbers_full (minfo, NULL, NULL, &n_il_offsets, &il_offsets, &line_numbers, NULL, NULL, NULL, NULL);
7230 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7231 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7232 sym_seq_points = TRUE;
7233 for (i = 0; i < n_il_offsets; ++i) {
7234 if (il_offsets [i] < header->code_size)
7235 mono_bitset_set_fast (seq_point_locs, il_offsets [i]);
7237 g_free (il_offsets);
7238 g_free (line_numbers);
7239 } else if (!method->wrapper_type && !method->dynamic && mono_debug_image_has_debug_info (method->klass->image)) {
7240 /* Methods without line number info like auto-generated property accessors */
7241 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7242 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7243 sym_seq_points = TRUE;
7248 * Methods without init_locals set could cause asserts in various passes
7249 * (#497220). To work around this, we emit dummy initialization opcodes
7250 * (OP_DUMMY_ICONST etc.) which generate no code. These are only supported
7251 * on some platforms.
7253 if ((cfg->opt & MONO_OPT_UNSAFE) && ARCH_HAVE_DUMMY_INIT)
7254 init_locals = header->init_locals;
7258 method_definition = method;
7259 while (method_definition->is_inflated) {
7260 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
7261 method_definition = imethod->declaring;
7264 /* SkipVerification is not allowed if core-clr is enabled */
7265 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
7267 dont_verify_stloc = TRUE;
7270 if (sig->is_inflated)
7271 generic_context = mono_method_get_context (method);
7272 else if (generic_container)
7273 generic_context = &generic_container->context;
7274 cfg->generic_context = generic_context;
7276 if (!cfg->generic_sharing_context)
7277 g_assert (!sig->has_type_parameters);
7279 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
7280 g_assert (method->is_inflated);
7281 g_assert (mono_method_get_context (method)->method_inst);
7283 if (method->is_inflated && mono_method_get_context (method)->method_inst)
7284 g_assert (sig->generic_param_count);
7286 if (cfg->method == method) {
7287 cfg->real_offset = 0;
7289 cfg->real_offset = inline_offset;
7292 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
7293 cfg->cil_offset_to_bb_len = header->code_size;
7295 cfg->current_method = method;
7297 if (cfg->verbose_level > 2)
7298 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
7300 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
7302 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
7303 for (n = 0; n < sig->param_count; ++n)
7304 param_types [n + sig->hasthis] = sig->params [n];
7305 cfg->arg_types = param_types;
7307 cfg->dont_inline = g_list_prepend (cfg->dont_inline, method);
7308 if (cfg->method == method) {
7310 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
7311 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
7314 NEW_BBLOCK (cfg, start_bblock);
7315 cfg->bb_entry = start_bblock;
7316 start_bblock->cil_code = NULL;
7317 start_bblock->cil_length = 0;
7318 #if defined(__native_client_codegen__)
7319 MONO_INST_NEW (cfg, ins, OP_NACL_GC_SAFE_POINT);
7320 ins->dreg = alloc_dreg (cfg, STACK_I4);
7321 MONO_ADD_INS (start_bblock, ins);
7325 NEW_BBLOCK (cfg, end_bblock);
7326 cfg->bb_exit = end_bblock;
7327 end_bblock->cil_code = NULL;
7328 end_bblock->cil_length = 0;
7329 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
7330 g_assert (cfg->num_bblocks == 2);
7332 arg_array = cfg->args;
7334 if (header->num_clauses) {
7335 cfg->spvars = g_hash_table_new (NULL, NULL);
7336 cfg->exvars = g_hash_table_new (NULL, NULL);
7338 /* handle exception clauses */
7339 for (i = 0; i < header->num_clauses; ++i) {
7340 MonoBasicBlock *try_bb;
7341 MonoExceptionClause *clause = &header->clauses [i];
7342 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
7343 try_bb->real_offset = clause->try_offset;
7344 try_bb->try_start = TRUE;
7345 try_bb->region = ((i + 1) << 8) | clause->flags;
7346 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
7347 tblock->real_offset = clause->handler_offset;
7348 tblock->flags |= BB_EXCEPTION_HANDLER;
7351 * Linking the try block with the EH block hinders inlining as we won't be able to
7352 * merge the bblocks from inlining and produce an artificial hole for no good reason.
7354 if (COMPILE_LLVM (cfg))
7355 link_bblock (cfg, try_bb, tblock);
7357 if (*(ip + clause->handler_offset) == CEE_POP)
7358 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
7360 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
7361 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
7362 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
7363 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
7364 MONO_ADD_INS (tblock, ins);
7366 if (seq_points && clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY) {
7367 /* finally clauses already have a seq point */
7368 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
7369 MONO_ADD_INS (tblock, ins);
7372 /* todo: is a fault block unsafe to optimize? */
7373 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
7374 tblock->flags |= BB_EXCEPTION_UNSAFE;
7378 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
7380 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
7382 /* catch and filter blocks get the exception object on the stack */
7383 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
7384 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
7385 MonoInst *dummy_use;
7387 /* mostly like handle_stack_args (), but just sets the input args */
7388 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
7389 tblock->in_scount = 1;
7390 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
7391 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
7394 * Add a dummy use for the exvar so its liveness info will be
7398 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
7400 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
7401 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
7402 tblock->flags |= BB_EXCEPTION_HANDLER;
7403 tblock->real_offset = clause->data.filter_offset;
7404 tblock->in_scount = 1;
7405 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
7406 /* The filter block shares the exvar with the handler block */
7407 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
7408 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
7409 MONO_ADD_INS (tblock, ins);
7413 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
7414 clause->data.catch_class &&
7415 cfg->generic_sharing_context &&
7416 mono_class_check_context_used (clause->data.catch_class)) {
7418 * In shared generic code with catch
7419 * clauses containing type variables
7420 * the exception handling code has to
7421 * be able to get to the rgctx.
7422 * Therefore we have to make sure that
7423 * the vtable/mrgctx argument (for
7424 * static or generic methods) or the
7425 * "this" argument (for non-static
7426 * methods) are live.
7428 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
7429 mini_method_get_context (method)->method_inst ||
7430 method->klass->valuetype) {
7431 mono_get_vtable_var (cfg);
7433 MonoInst *dummy_use;
7435 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
7440 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
7441 cfg->cbb = start_bblock;
7442 cfg->args = arg_array;
7443 mono_save_args (cfg, sig, inline_args);
7446 /* FIRST CODE BLOCK */
7447 NEW_BBLOCK (cfg, bblock);
7448 bblock->cil_code = ip;
7452 ADD_BBLOCK (cfg, bblock);
7454 if (cfg->method == method) {
7455 breakpoint_id = mono_debugger_method_has_breakpoint (method);
7456 if (breakpoint_id) {
7457 MONO_INST_NEW (cfg, ins, OP_BREAK);
7458 MONO_ADD_INS (bblock, ins);
7462 if (mono_security_cas_enabled ())
7463 secman = mono_security_manager_get_methods ();
7465 security = (secman && mono_security_method_has_declsec (method));
7466 /* at this point having security doesn't mean we have any code to generate */
7467 if (security && (cfg->method == method)) {
7468 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
7469 * And we do not want to enter the next section (with allocation) if we
7470 * have nothing to generate */
7471 security = mono_declsec_get_demands (method, &actions);
7474 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
7475 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
7477 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
7478 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
7479 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
7481 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
7482 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
7486 mono_custom_attrs_free (custom);
7489 custom = mono_custom_attrs_from_class (wrapped->klass);
7490 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
7494 mono_custom_attrs_free (custom);
7497 /* not a P/Invoke after all */
7502 /* we use a separate basic block for the initialization code */
7503 NEW_BBLOCK (cfg, init_localsbb);
7504 cfg->bb_init = init_localsbb;
7505 init_localsbb->real_offset = cfg->real_offset;
7506 start_bblock->next_bb = init_localsbb;
7507 init_localsbb->next_bb = bblock;
7508 link_bblock (cfg, start_bblock, init_localsbb);
7509 link_bblock (cfg, init_localsbb, bblock);
7511 cfg->cbb = init_localsbb;
7513 if (cfg->gsharedvt && cfg->method == method) {
7514 MonoGSharedVtMethodInfo *info;
7515 MonoInst *var, *locals_var;
7518 info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoGSharedVtMethodInfo));
7519 info->method = cfg->method;
7520 info->count_entries = 16;
7521 info->entries = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
7522 cfg->gsharedvt_info = info;
7524 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
7525 /* prevent it from being register allocated */
7526 //var->flags |= MONO_INST_VOLATILE;
7527 cfg->gsharedvt_info_var = var;
7529 ins = emit_get_rgctx_gsharedvt_method (cfg, mini_method_check_context_used (cfg, method), method, info);
7530 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, var->dreg, ins->dreg);
7532 /* Allocate locals */
7533 locals_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
7534 /* prevent it from being register allocated */
7535 //locals_var->flags |= MONO_INST_VOLATILE;
7536 cfg->gsharedvt_locals_var = locals_var;
7538 dreg = alloc_ireg (cfg);
7539 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, dreg, var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, locals_size));
7541 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
7542 ins->dreg = locals_var->dreg;
7544 MONO_ADD_INS (cfg->cbb, ins);
7545 cfg->gsharedvt_locals_var_ins = ins;
7547 cfg->flags |= MONO_CFG_HAS_ALLOCA;
7550 ins->flags |= MONO_INST_INIT;
7554 /* at this point we know, if security is TRUE, that some code needs to be generated */
7555 if (security && (cfg->method == method)) {
7558 cfg->stat_cas_demand_generation++;
7560 if (actions.demand.blob) {
7561 /* Add code for SecurityAction.Demand */
7562 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
7563 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
7564 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
7565 mono_emit_method_call (cfg, secman->demand, args, NULL);
7567 if (actions.noncasdemand.blob) {
7568 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
7569 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
7570 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
7571 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
7572 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
7573 mono_emit_method_call (cfg, secman->demand, args, NULL);
7575 if (actions.demandchoice.blob) {
7576 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
7577 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
7578 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
7579 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
7580 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
7584 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
7586 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
7589 if (mono_security_core_clr_enabled ()) {
7590 /* check if this is native code, e.g. an icall or a p/invoke */
7591 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
7592 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
7594 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
7595 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
7597 /* if this ia a native call then it can only be JITted from platform code */
7598 if ((icall || pinvk) && method->klass && method->klass->image) {
7599 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
7600 MonoException *ex = icall ? mono_get_exception_security () :
7601 mono_get_exception_method_access ();
7602 emit_throw_exception (cfg, ex);
7609 CHECK_CFG_EXCEPTION;
7611 if (header->code_size == 0)
7614 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
7619 if (cfg->method == method)
7620 mono_debug_init_method (cfg, bblock, breakpoint_id);
7622 for (n = 0; n < header->num_locals; ++n) {
7623 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
7628 /* We force the vtable variable here for all shared methods
7629 for the possibility that they might show up in a stack
7630 trace where their exact instantiation is needed. */
7631 if (cfg->generic_sharing_context && method == cfg->method) {
7632 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
7633 mini_method_get_context (method)->method_inst ||
7634 method->klass->valuetype) {
7635 mono_get_vtable_var (cfg);
7637 /* FIXME: Is there a better way to do this?
7638 We need the variable live for the duration
7639 of the whole method. */
7640 cfg->args [0]->flags |= MONO_INST_VOLATILE;
7644 /* add a check for this != NULL to inlined methods */
7645 if (is_virtual_call) {
7648 NEW_ARGLOAD (cfg, arg_ins, 0);
7649 MONO_ADD_INS (cfg->cbb, arg_ins);
7650 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
7653 skip_dead_blocks = !dont_verify;
7654 if (skip_dead_blocks) {
7655 original_bb = bb = mono_basic_block_split (method, &cfg->error);
7660 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
7661 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
7664 start_new_bblock = 0;
7667 if (cfg->method == method)
7668 cfg->real_offset = ip - header->code;
7670 cfg->real_offset = inline_offset;
7675 if (start_new_bblock) {
7676 bblock->cil_length = ip - bblock->cil_code;
7677 if (start_new_bblock == 2) {
7678 g_assert (ip == tblock->cil_code);
7680 GET_BBLOCK (cfg, tblock, ip);
7682 bblock->next_bb = tblock;
7685 start_new_bblock = 0;
7686 for (i = 0; i < bblock->in_scount; ++i) {
7687 if (cfg->verbose_level > 3)
7688 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
7689 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
7693 g_slist_free (class_inits);
7696 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
7697 link_bblock (cfg, bblock, tblock);
7698 if (sp != stack_start) {
7699 handle_stack_args (cfg, stack_start, sp - stack_start);
7701 CHECK_UNVERIFIABLE (cfg);
7703 bblock->next_bb = tblock;
7706 for (i = 0; i < bblock->in_scount; ++i) {
7707 if (cfg->verbose_level > 3)
7708 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
7709 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
7712 g_slist_free (class_inits);
7717 if (skip_dead_blocks) {
7718 int ip_offset = ip - header->code;
7720 if (ip_offset == bb->end)
7724 int op_size = mono_opcode_size (ip, end);
7725 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
7727 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
7729 if (ip_offset + op_size == bb->end) {
7730 MONO_INST_NEW (cfg, ins, OP_NOP);
7731 MONO_ADD_INS (bblock, ins);
7732 start_new_bblock = 1;
7740 * Sequence points are points where the debugger can place a breakpoint.
7741 * Currently, we generate these automatically at points where the IL
7744 if (seq_points && ((!sym_seq_points && (sp == stack_start)) || (sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code)))) {
7746 * Make methods interruptable at the beginning, and at the targets of
7747 * backward branches.
7748 * Also, do this at the start of every bblock in methods with clauses too,
7749 * to be able to handle instructions with inprecise control flow like
7751 * Backward branches are handled at the end of method-to-ir ().
7753 gboolean intr_loc = ip == header->code || (!cfg->cbb->last_ins && cfg->header->num_clauses);
7755 /* Avoid sequence points on empty IL like .volatile */
7756 // FIXME: Enable this
7757 //if (!(cfg->cbb->last_ins && cfg->cbb->last_ins->opcode == OP_SEQ_POINT)) {
7758 NEW_SEQ_POINT (cfg, ins, ip - header->code, intr_loc);
7759 if (sp != stack_start)
7760 ins->flags |= MONO_INST_NONEMPTY_STACK;
7761 MONO_ADD_INS (cfg->cbb, ins);
7764 mono_bitset_set_fast (seq_point_set_locs, ip - header->code);
7767 bblock->real_offset = cfg->real_offset;
7769 if ((cfg->method == method) && cfg->coverage_info) {
7770 guint32 cil_offset = ip - header->code;
7771 cfg->coverage_info->data [cil_offset].cil_code = ip;
7773 /* TODO: Use an increment here */
7774 #if defined(TARGET_X86)
7775 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
7776 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
7778 MONO_ADD_INS (cfg->cbb, ins);
7780 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
7781 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
7785 if (cfg->verbose_level > 3)
7786 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
7790 if (seq_points && !sym_seq_points && sp != stack_start) {
7792 * The C# compiler uses these nops to notify the JIT that it should
7793 * insert seq points.
7795 NEW_SEQ_POINT (cfg, ins, ip - header->code, FALSE);
7796 MONO_ADD_INS (cfg->cbb, ins);
7798 if (cfg->keep_cil_nops)
7799 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
7801 MONO_INST_NEW (cfg, ins, OP_NOP);
7803 MONO_ADD_INS (bblock, ins);
7806 if (should_insert_brekpoint (cfg->method)) {
7807 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
7809 MONO_INST_NEW (cfg, ins, OP_NOP);
7812 MONO_ADD_INS (bblock, ins);
7818 CHECK_STACK_OVF (1);
7819 n = (*ip)-CEE_LDARG_0;
7821 EMIT_NEW_ARGLOAD (cfg, ins, n);
7829 CHECK_STACK_OVF (1);
7830 n = (*ip)-CEE_LDLOC_0;
7832 EMIT_NEW_LOCLOAD (cfg, ins, n);
7841 n = (*ip)-CEE_STLOC_0;
7844 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
7846 emit_stloc_ir (cfg, sp, header, n);
7853 CHECK_STACK_OVF (1);
7856 EMIT_NEW_ARGLOAD (cfg, ins, n);
7862 CHECK_STACK_OVF (1);
7865 NEW_ARGLOADA (cfg, ins, n);
7866 MONO_ADD_INS (cfg->cbb, ins);
7876 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
7878 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
7883 CHECK_STACK_OVF (1);
7886 EMIT_NEW_LOCLOAD (cfg, ins, n);
7890 case CEE_LDLOCA_S: {
7891 unsigned char *tmp_ip;
7893 CHECK_STACK_OVF (1);
7894 CHECK_LOCAL (ip [1]);
7896 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
7902 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
7911 CHECK_LOCAL (ip [1]);
7912 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
7914 emit_stloc_ir (cfg, sp, header, ip [1]);
7919 CHECK_STACK_OVF (1);
7920 EMIT_NEW_PCONST (cfg, ins, NULL);
7921 ins->type = STACK_OBJ;
7926 CHECK_STACK_OVF (1);
7927 EMIT_NEW_ICONST (cfg, ins, -1);
7940 CHECK_STACK_OVF (1);
7941 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
7947 CHECK_STACK_OVF (1);
7949 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
7955 CHECK_STACK_OVF (1);
7956 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
7962 CHECK_STACK_OVF (1);
7963 MONO_INST_NEW (cfg, ins, OP_I8CONST);
7964 ins->type = STACK_I8;
7965 ins->dreg = alloc_dreg (cfg, STACK_I8);
7967 ins->inst_l = (gint64)read64 (ip);
7968 MONO_ADD_INS (bblock, ins);
7974 gboolean use_aotconst = FALSE;
7976 #ifdef TARGET_POWERPC
7977 /* FIXME: Clean this up */
7978 if (cfg->compile_aot)
7979 use_aotconst = TRUE;
7982 /* FIXME: we should really allocate this only late in the compilation process */
7983 f = mono_domain_alloc (cfg->domain, sizeof (float));
7985 CHECK_STACK_OVF (1);
7991 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
7993 dreg = alloc_freg (cfg);
7994 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
7995 ins->type = STACK_R8;
7997 MONO_INST_NEW (cfg, ins, OP_R4CONST);
7998 ins->type = STACK_R8;
7999 ins->dreg = alloc_dreg (cfg, STACK_R8);
8001 MONO_ADD_INS (bblock, ins);
8011 gboolean use_aotconst = FALSE;
8013 #ifdef TARGET_POWERPC
8014 /* FIXME: Clean this up */
8015 if (cfg->compile_aot)
8016 use_aotconst = TRUE;
8019 /* FIXME: we should really allocate this only late in the compilation process */
8020 d = mono_domain_alloc (cfg->domain, sizeof (double));
8022 CHECK_STACK_OVF (1);
8028 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
8030 dreg = alloc_freg (cfg);
8031 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
8032 ins->type = STACK_R8;
8034 MONO_INST_NEW (cfg, ins, OP_R8CONST);
8035 ins->type = STACK_R8;
8036 ins->dreg = alloc_dreg (cfg, STACK_R8);
8038 MONO_ADD_INS (bblock, ins);
8047 MonoInst *temp, *store;
8049 CHECK_STACK_OVF (1);
8053 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
8054 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
8056 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
8059 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
8072 if (sp [0]->type == STACK_R8)
8073 /* we need to pop the value from the x86 FP stack */
8074 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
8080 INLINE_FAILURE ("jmp");
8081 GSHAREDVT_FAILURE (*ip);
8084 if (stack_start != sp)
8086 token = read32 (ip + 1);
8087 /* FIXME: check the signature matches */
8088 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
8090 if (!cmethod || mono_loader_get_last_error ())
8093 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
8094 GENERIC_SHARING_FAILURE (CEE_JMP);
8096 if (mono_security_cas_enabled ())
8097 CHECK_CFG_EXCEPTION;
8099 emit_instrumentation_call (cfg, mono_profiler_method_leave);
8101 if (ARCH_HAVE_OP_TAIL_CALL) {
8102 MonoMethodSignature *fsig = mono_method_signature (cmethod);
8105 /* Handle tail calls similarly to calls */
8106 n = fsig->param_count + fsig->hasthis;
8110 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
8111 call->method = cmethod;
8112 call->tail_call = TRUE;
8113 call->signature = mono_method_signature (cmethod);
8114 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
8115 call->inst.inst_p0 = cmethod;
8116 for (i = 0; i < n; ++i)
8117 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
8119 mono_arch_emit_call (cfg, call);
8120 cfg->param_area = MAX(cfg->param_area, call->stack_usage);
8121 MONO_ADD_INS (bblock, (MonoInst*)call);
8123 for (i = 0; i < num_args; ++i)
8124 /* Prevent arguments from being optimized away */
8125 arg_array [i]->flags |= MONO_INST_VOLATILE;
8127 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
8128 ins = (MonoInst*)call;
8129 ins->inst_p0 = cmethod;
8130 MONO_ADD_INS (bblock, ins);
8134 start_new_bblock = 1;
8139 case CEE_CALLVIRT: {
8140 MonoInst *addr = NULL;
8141 MonoMethodSignature *fsig = NULL;
8143 int virtual = *ip == CEE_CALLVIRT;
8144 int calli = *ip == CEE_CALLI;
8145 gboolean pass_imt_from_rgctx = FALSE;
8146 MonoInst *imt_arg = NULL;
8147 MonoInst *keep_this_alive = NULL;
8148 gboolean pass_vtable = FALSE;
8149 gboolean pass_mrgctx = FALSE;
8150 MonoInst *vtable_arg = NULL;
8151 gboolean check_this = FALSE;
8152 gboolean supported_tail_call = FALSE;
8153 gboolean tail_call = FALSE;
8154 gboolean need_seq_point = FALSE;
8155 guint32 call_opcode = *ip;
8156 gboolean emit_widen = TRUE;
8157 gboolean push_res = TRUE;
8158 gboolean skip_ret = FALSE;
8159 gboolean delegate_invoke = FALSE;
8162 token = read32 (ip + 1);
8167 //GSHAREDVT_FAILURE (*ip);
8172 fsig = mini_get_signature (method, token, generic_context);
8173 n = fsig->param_count + fsig->hasthis;
8175 if (method->dynamic && fsig->pinvoke) {
8179 * This is a call through a function pointer using a pinvoke
8180 * signature. Have to create a wrapper and call that instead.
8181 * FIXME: This is very slow, need to create a wrapper at JIT time
8182 * instead based on the signature.
8184 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
8185 EMIT_NEW_PCONST (cfg, args [1], fsig);
8187 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
8190 MonoMethod *cil_method;
8192 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
8193 cil_method = cmethod;
8195 if (constrained_call) {
8196 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8197 if (cfg->verbose_level > 2)
8198 printf ("DM Constrained call to %s\n", mono_type_get_full_name (constrained_call));
8199 if (!((constrained_call->byval_arg.type == MONO_TYPE_VAR ||
8200 constrained_call->byval_arg.type == MONO_TYPE_MVAR) &&
8201 cfg->generic_sharing_context)) {
8202 cmethod = mono_get_method_constrained_with_method (image, cil_method, constrained_call, generic_context, &cfg->error);
8206 if (cfg->verbose_level > 2)
8207 printf ("Constrained call to %s\n", mono_type_get_full_name (constrained_call));
8209 if ((constrained_call->byval_arg.type == MONO_TYPE_VAR || constrained_call->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
8211 * This is needed since get_method_constrained can't find
8212 * the method in klass representing a type var.
8213 * The type var is guaranteed to be a reference type in this
8216 if (!mini_is_gsharedvt_klass (cfg, constrained_call))
8217 g_assert (!cmethod->klass->valuetype);
8219 cmethod = mono_get_method_constrained_checked (image, token, constrained_call, generic_context, &cil_method, &cfg->error);
8225 if (!cmethod || mono_loader_get_last_error ())
8227 if (!dont_verify && !cfg->skip_visibility) {
8228 MonoMethod *target_method = cil_method;
8229 if (method->is_inflated) {
8230 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
8232 if (!mono_method_can_access_method (method_definition, target_method) &&
8233 !mono_method_can_access_method (method, cil_method))
8234 METHOD_ACCESS_FAILURE (method, cil_method);
8237 if (mono_security_core_clr_enabled ())
8238 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
8240 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
8241 /* MS.NET seems to silently convert this to a callvirt */
8246 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
8247 * converts to a callvirt.
8249 * tests/bug-515884.il is an example of this behavior
8251 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
8252 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
8253 if (!virtual && mono_class_is_marshalbyref (cmethod->klass) && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
8257 if (!cmethod->klass->inited)
8258 if (!mono_class_init (cmethod->klass))
8259 TYPE_LOAD_ERROR (cmethod->klass);
8261 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
8262 mini_class_is_system_array (cmethod->klass)) {
8263 array_rank = cmethod->klass->rank;
8264 fsig = mono_method_signature (cmethod);
8266 fsig = mono_method_signature (cmethod);
8271 if (fsig->pinvoke) {
8272 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
8273 check_for_pending_exc, cfg->compile_aot);
8274 fsig = mono_method_signature (wrapper);
8275 } else if (constrained_call) {
8276 fsig = mono_method_signature (cmethod);
8278 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
8283 mono_save_token_info (cfg, image, token, cil_method);
8285 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip + 5 - header->code)))
8286 need_seq_point = TRUE;
8288 n = fsig->param_count + fsig->hasthis;
8290 /* Don't support calls made using type arguments for now */
8292 if (cfg->gsharedvt) {
8293 if (mini_is_gsharedvt_signature (cfg, fsig))
8294 GSHAREDVT_FAILURE (*ip);
8298 if (mono_security_cas_enabled ()) {
8299 if (check_linkdemand (cfg, method, cmethod))
8300 INLINE_FAILURE ("linkdemand");
8301 CHECK_CFG_EXCEPTION;
8304 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
8305 g_assert_not_reached ();
8308 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
8311 if (!cfg->generic_sharing_context && cmethod)
8312 g_assert (!mono_method_check_context_used (cmethod));
8316 //g_assert (!virtual || fsig->hasthis);
8320 if (constrained_call) {
8321 if (mini_is_gsharedvt_klass (cfg, constrained_call)) {
8323 * Constrained calls need to behave differently at runtime dependending on whenever the receiver is instantiated as ref type or as a vtype.
8325 if ((cmethod->klass != mono_defaults.object_class) && constrained_call->valuetype && cmethod->klass->valuetype) {
8326 /* The 'Own method' case below */
8327 } else if (cmethod->klass->image != mono_defaults.corlib && !(cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) && !cmethod->klass->valuetype) {
8328 /* 'The type parameter is instantiated as a reference type' case below. */
8329 } else if (((cmethod->klass == mono_defaults.object_class) || (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) || (!cmethod->klass->valuetype && cmethod->klass->image != mono_defaults.corlib)) &&
8330 (MONO_TYPE_IS_VOID (fsig->ret) || MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_IS_REFERENCE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret) || mini_is_gsharedvt_type (cfg, fsig->ret)) &&
8331 (fsig->param_count == 0 || (!fsig->hasthis && fsig->param_count == 1) || (fsig->param_count == 1 && (MONO_TYPE_IS_REFERENCE (fsig->params [0]) || mini_is_gsharedvt_type (cfg, fsig->params [0]))))) {
8332 MonoInst *args [16];
8335 * This case handles calls to
8336 * - object:ToString()/Equals()/GetHashCode(),
8337 * - System.IComparable<T>:CompareTo()
8338 * - System.IEquatable<T>:Equals ()
8339 * plus some simple interface calls enough to support AsyncTaskMethodBuilder.
8343 if (mono_method_check_context_used (cmethod))
8344 args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (cmethod), cmethod, MONO_RGCTX_INFO_METHOD);
8346 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
8347 args [2] = emit_get_rgctx_klass (cfg, mono_class_check_context_used (constrained_call), constrained_call, MONO_RGCTX_INFO_KLASS);
8349 /* !fsig->hasthis is for the wrapper for the Object.GetType () icall */
8350 if (fsig->hasthis && fsig->param_count) {
8351 /* Pass the arguments using a localloc-ed array using the format expected by runtime_invoke () */
8352 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
8353 ins->dreg = alloc_preg (cfg);
8354 ins->inst_imm = fsig->param_count * sizeof (mgreg_t);
8355 MONO_ADD_INS (cfg->cbb, ins);
8358 if (mini_is_gsharedvt_type (cfg, fsig->params [0])) {
8361 args [3] = emit_get_gsharedvt_info_klass (cfg, mono_class_from_mono_type (fsig->params [0]), MONO_RGCTX_INFO_CLASS_BOX_TYPE);
8363 EMIT_NEW_VARLOADA_VREG (cfg, ins, sp [1]->dreg, fsig->params [0]);
8364 addr_reg = ins->dreg;
8365 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, addr_reg);
8367 EMIT_NEW_ICONST (cfg, args [3], 0);
8368 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, sp [1]->dreg);
8371 EMIT_NEW_ICONST (cfg, args [3], 0);
8372 EMIT_NEW_ICONST (cfg, args [4], 0);
8374 ins = mono_emit_jit_icall (cfg, mono_gsharedvt_constrained_call, args);
8377 if (mini_is_gsharedvt_type (cfg, fsig->ret)) {
8378 ins = handle_unbox_gsharedvt (cfg, mono_class_from_mono_type (fsig->ret), ins, &bblock);
8379 } else if (MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret)) {
8383 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), ins->dreg, sizeof (MonoObject));
8384 MONO_ADD_INS (cfg->cbb, add);
8386 NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, add->dreg, 0);
8387 MONO_ADD_INS (cfg->cbb, ins);
8388 /* ins represents the call result */
8393 GSHAREDVT_FAILURE (*ip);
8397 * We have the `constrained.' prefix opcode.
8399 if (constrained_call->valuetype && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
8401 * The type parameter is instantiated as a valuetype,
8402 * but that type doesn't override the method we're
8403 * calling, so we need to box `this'.
8405 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
8406 ins->klass = constrained_call;
8407 sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call), &bblock);
8408 CHECK_CFG_EXCEPTION;
8409 } else if (!constrained_call->valuetype) {
8410 int dreg = alloc_ireg_ref (cfg);
8413 * The type parameter is instantiated as a reference
8414 * type. We have a managed pointer on the stack, so
8415 * we need to dereference it here.
8417 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
8418 ins->type = STACK_OBJ;
8421 if (cmethod->klass->valuetype) {
8424 /* Interface method */
8427 mono_class_setup_vtable (constrained_call);
8428 CHECK_TYPELOAD (constrained_call);
8429 ioffset = mono_class_interface_offset (constrained_call, cmethod->klass);
8431 TYPE_LOAD_ERROR (constrained_call);
8432 slot = mono_method_get_vtable_slot (cmethod);
8434 TYPE_LOAD_ERROR (cmethod->klass);
8435 cmethod = constrained_call->vtable [ioffset + slot];
8437 if (cmethod->klass == mono_defaults.enum_class) {
8438 /* Enum implements some interfaces, so treat this as the first case */
8439 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
8440 ins->klass = constrained_call;
8441 sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call), &bblock);
8442 CHECK_CFG_EXCEPTION;
8447 constrained_call = NULL;
8450 if (!calli && check_call_signature (cfg, fsig, sp))
8453 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
8454 if (cmethod && (cmethod->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (cmethod->name, "Invoke"))
8455 delegate_invoke = TRUE;
8458 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_sharable_method (cfg, cmethod, fsig, sp))) {
8460 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8461 type_to_eval_stack_type ((cfg), fsig->ret, ins);
8469 * If the callee is a shared method, then its static cctor
8470 * might not get called after the call was patched.
8472 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
8473 emit_generic_class_init (cfg, cmethod->klass);
8474 CHECK_TYPELOAD (cmethod->klass);
8478 check_method_sharing (cfg, cmethod, &pass_vtable, &pass_mrgctx);
8480 if (cfg->generic_sharing_context && cmethod) {
8481 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
8483 context_used = mini_method_check_context_used (cfg, cmethod);
8485 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
8486 /* Generic method interface
8487 calls are resolved via a
8488 helper function and don't
8490 if (!cmethod_context || !cmethod_context->method_inst)
8491 pass_imt_from_rgctx = TRUE;
8495 * If a shared method calls another
8496 * shared method then the caller must
8497 * have a generic sharing context
8498 * because the magic trampoline
8499 * requires it. FIXME: We shouldn't
8500 * have to force the vtable/mrgctx
8501 * variable here. Instead there
8502 * should be a flag in the cfg to
8503 * request a generic sharing context.
8506 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
8507 mono_get_vtable_var (cfg);
8512 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
8514 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
8516 CHECK_TYPELOAD (cmethod->klass);
8517 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
8522 g_assert (!vtable_arg);
8524 if (!cfg->compile_aot) {
8526 * emit_get_rgctx_method () calls mono_class_vtable () so check
8527 * for type load errors before.
8529 mono_class_setup_vtable (cmethod->klass);
8530 CHECK_TYPELOAD (cmethod->klass);
8533 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
8535 /* !marshalbyref is needed to properly handle generic methods + remoting */
8536 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
8537 MONO_METHOD_IS_FINAL (cmethod)) &&
8538 !mono_class_is_marshalbyref (cmethod->klass)) {
8545 if (pass_imt_from_rgctx) {
8546 g_assert (!pass_vtable);
8549 imt_arg = emit_get_rgctx_method (cfg, context_used,
8550 cmethod, MONO_RGCTX_INFO_METHOD);
8554 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
8556 /* Calling virtual generic methods */
8557 if (cmethod && virtual &&
8558 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
8559 !(MONO_METHOD_IS_FINAL (cmethod) &&
8560 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
8561 fsig->generic_param_count &&
8562 !(cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))) {
8563 MonoInst *this_temp, *this_arg_temp, *store;
8564 MonoInst *iargs [4];
8565 gboolean use_imt = FALSE;
8567 g_assert (fsig->is_inflated);
8569 /* Prevent inlining of methods that contain indirect calls */
8570 INLINE_FAILURE ("virtual generic call");
8572 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))
8573 GSHAREDVT_FAILURE (*ip);
8575 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK && defined(MONO_ARCH_GSHARED_SUPPORTED)
8576 if (cmethod->wrapper_type == MONO_WRAPPER_NONE && mono_use_imt)
8581 g_assert (!imt_arg);
8583 g_assert (cmethod->is_inflated);
8584 imt_arg = emit_get_rgctx_method (cfg, context_used,
8585 cmethod, MONO_RGCTX_INFO_METHOD);
8586 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, sp [0], imt_arg, NULL);
8588 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
8589 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
8590 MONO_ADD_INS (bblock, store);
8592 /* FIXME: This should be a managed pointer */
8593 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
8595 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
8596 iargs [1] = emit_get_rgctx_method (cfg, context_used,
8597 cmethod, MONO_RGCTX_INFO_METHOD);
8598 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
8599 addr = mono_emit_jit_icall (cfg,
8600 mono_helper_compile_generic_method, iargs);
8602 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
8604 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8611 * Implement a workaround for the inherent races involved in locking:
8617 * If a thread abort happens between the call to Monitor.Enter () and the start of the
8618 * try block, the Exit () won't be executed, see:
8619 * http://www.bluebytesoftware.com/blog/2007/01/30/MonitorEnterThreadAbortsAndOrphanedLocks.aspx
8620 * To work around this, we extend such try blocks to include the last x bytes
8621 * of the Monitor.Enter () call.
8623 if (cmethod && cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
8624 MonoBasicBlock *tbb;
8626 GET_BBLOCK (cfg, tbb, ip + 5);
8628 * Only extend try blocks with a finally, to avoid catching exceptions thrown
8629 * from Monitor.Enter like ArgumentNullException.
8631 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
8632 /* Mark this bblock as needing to be extended */
8633 tbb->extend_try_block = TRUE;
8637 /* Conversion to a JIT intrinsic */
8638 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
8640 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8641 type_to_eval_stack_type ((cfg), fsig->ret, ins);
8648 if (cmethod && (cfg->opt & MONO_OPT_INLINE) &&
8649 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
8650 mono_method_check_inlining (cfg, cmethod)) {
8652 gboolean always = FALSE;
8654 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
8655 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
8656 /* Prevent inlining of methods that call wrappers */
8657 INLINE_FAILURE ("wrapper call");
8658 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
8662 costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, always, &bblock);
8664 cfg->real_offset += 5;
8666 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8667 /* *sp is already set by inline_method */
8672 inline_costs += costs;
8678 /* Tail recursion elimination */
8679 if ((cfg->opt & MONO_OPT_TAILC) && call_opcode == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
8680 gboolean has_vtargs = FALSE;
8683 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
8684 INLINE_FAILURE ("tail call");
8686 /* keep it simple */
8687 for (i = fsig->param_count - 1; i >= 0; i--) {
8688 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
8693 for (i = 0; i < n; ++i)
8694 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
8695 MONO_INST_NEW (cfg, ins, OP_BR);
8696 MONO_ADD_INS (bblock, ins);
8697 tblock = start_bblock->out_bb [0];
8698 link_bblock (cfg, bblock, tblock);
8699 ins->inst_target_bb = tblock;
8700 start_new_bblock = 1;
8702 /* skip the CEE_RET, too */
8703 if (ip_in_bb (cfg, bblock, ip + 5))
8710 inline_costs += 10 * num_calls++;
8713 * Making generic calls out of gsharedvt methods.
8714 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
8715 * patching gshared method addresses into a gsharedvt method.
8717 if (cmethod && cfg->gsharedvt && (mini_is_gsharedvt_signature (cfg, fsig) || cmethod->is_inflated || cmethod->klass->generic_class)) {
8718 MonoRgctxInfoType info_type;
8721 //if (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)
8722 //GSHAREDVT_FAILURE (*ip);
8723 // disable for possible remoting calls
8724 if (fsig->hasthis && (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class))
8725 GSHAREDVT_FAILURE (*ip);
8726 if (fsig->generic_param_count) {
8727 /* virtual generic call */
8728 g_assert (mono_use_imt);
8729 g_assert (!imt_arg);
8730 /* Same as the virtual generic case above */
8731 imt_arg = emit_get_rgctx_method (cfg, context_used,
8732 cmethod, MONO_RGCTX_INFO_METHOD);
8733 /* This is not needed, as the trampoline code will pass one, and it might be passed in the same reg as the imt arg */
8735 } else if ((cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) && !imt_arg) {
8736 /* This can happen when we call a fully instantiated iface method */
8737 imt_arg = emit_get_rgctx_method (cfg, context_used,
8738 cmethod, MONO_RGCTX_INFO_METHOD);
8743 if (cmethod->klass->rank && cmethod->klass->byval_arg.type != MONO_TYPE_SZARRAY)
8744 /* test_0_multi_dim_arrays () in gshared.cs */
8745 GSHAREDVT_FAILURE (*ip);
8747 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (cmethod->name, "Invoke")))
8748 keep_this_alive = sp [0];
8750 if (virtual && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))
8751 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE_VIRT;
8753 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE;
8754 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, info_type);
8756 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
8758 } else if (calli && cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
8760 * We pass the address to the gsharedvt trampoline in the rgctx reg
8762 MonoInst *callee = addr;
8764 if (method->wrapper_type != MONO_WRAPPER_DELEGATE_INVOKE)
8766 GSHAREDVT_FAILURE (*ip);
8768 addr = emit_get_rgctx_sig (cfg, context_used,
8769 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
8770 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, callee);
8774 /* Generic sharing */
8777 * Use this if the callee is gsharedvt sharable too, since
8778 * at runtime we might find an instantiation so the call cannot
8779 * be patched (the 'no_patch' code path in mini-trampolines.c).
8781 if (context_used && !imt_arg && !array_rank && !delegate_invoke &&
8782 (!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
8783 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
8784 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
8785 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
8786 INLINE_FAILURE ("gshared");
8788 g_assert (cfg->generic_sharing_context && cmethod);
8792 * We are compiling a call to a
8793 * generic method from shared code,
8794 * which means that we have to look up
8795 * the method in the rgctx and do an
8799 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
8801 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
8802 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
8806 /* Indirect calls */
8808 if (call_opcode == CEE_CALL)
8809 g_assert (context_used);
8810 else if (call_opcode == CEE_CALLI)
8811 g_assert (!vtable_arg);
8813 /* FIXME: what the hell is this??? */
8814 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
8815 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
8817 /* Prevent inlining of methods with indirect calls */
8818 INLINE_FAILURE ("indirect call");
8820 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST || addr->opcode == OP_GOT_ENTRY) {
8825 * Instead of emitting an indirect call, emit a direct call
8826 * with the contents of the aotconst as the patch info.
8828 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST) {
8829 info_type = addr->inst_c1;
8830 info_data = addr->inst_p0;
8832 info_type = addr->inst_right->inst_c1;
8833 info_data = addr->inst_right->inst_left;
8836 if (info_type == MONO_PATCH_INFO_ICALL_ADDR || info_type == MONO_PATCH_INFO_JIT_ICALL_ADDR) {
8837 ins = (MonoInst*)mono_emit_abs_call (cfg, info_type, info_data, fsig, sp);
8842 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
8850 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
8851 MonoInst *val = sp [fsig->param_count];
8853 if (val->type == STACK_OBJ) {
8854 MonoInst *iargs [2];
8859 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
8862 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
8863 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, val->dreg);
8864 if (cfg->gen_write_barriers && val->type == STACK_OBJ && !(val->opcode == OP_PCONST && val->inst_c0 == 0))
8865 emit_write_barrier (cfg, addr, val);
8866 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
8867 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
8869 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
8870 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
8871 if (!cmethod->klass->element_class->valuetype && !readonly)
8872 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
8873 CHECK_TYPELOAD (cmethod->klass);
8876 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
8879 g_assert_not_reached ();
8886 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
8890 /* Tail prefix / tail call optimization */
8892 /* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests */
8893 /* FIXME: runtime generic context pointer for jumps? */
8894 /* FIXME: handle this for generic sharing eventually */
8895 if (cmethod && (ins_flag & MONO_INST_TAILCALL) &&
8896 !vtable_arg && !cfg->generic_sharing_context && is_supported_tail_call (cfg, method, cmethod, fsig, call_opcode))
8897 supported_tail_call = TRUE;
8899 if (supported_tail_call) {
8902 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
8903 INLINE_FAILURE ("tail call");
8905 //printf ("HIT: %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
8907 if (ARCH_HAVE_OP_TAIL_CALL) {
8908 /* Handle tail calls similarly to normal calls */
8911 emit_instrumentation_call (cfg, mono_profiler_method_leave);
8913 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
8914 call->tail_call = TRUE;
8915 call->method = cmethod;
8916 call->signature = mono_method_signature (cmethod);
8919 * We implement tail calls by storing the actual arguments into the
8920 * argument variables, then emitting a CEE_JMP.
8922 for (i = 0; i < n; ++i) {
8923 /* Prevent argument from being register allocated */
8924 arg_array [i]->flags |= MONO_INST_VOLATILE;
8925 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
8927 ins = (MonoInst*)call;
8928 ins->inst_p0 = cmethod;
8929 ins->inst_p1 = arg_array [0];
8930 MONO_ADD_INS (bblock, ins);
8931 link_bblock (cfg, bblock, end_bblock);
8932 start_new_bblock = 1;
8934 // FIXME: Eliminate unreachable epilogs
8937 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
8938 * only reachable from this call.
8940 GET_BBLOCK (cfg, tblock, ip + 5);
8941 if (tblock == bblock || tblock->in_count == 0)
8950 * Synchronized wrappers.
8951 * Its hard to determine where to replace a method with its synchronized
8952 * wrapper without causing an infinite recursion. The current solution is
8953 * to add the synchronized wrapper in the trampolines, and to
8954 * change the called method to a dummy wrapper, and resolve that wrapper
8955 * to the real method in mono_jit_compile_method ().
8957 if (cfg->method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
8958 MonoMethod *orig = mono_marshal_method_from_wrapper (cfg->method);
8959 if (cmethod == orig || (cmethod->is_inflated && mono_method_get_declaring_generic_method (cmethod) == orig))
8960 cmethod = mono_marshal_get_synchronized_inner_wrapper (cmethod);
8964 INLINE_FAILURE ("call");
8965 ins = mono_emit_method_call_full (cfg, cmethod, fsig, tail_call, sp, virtual ? sp [0] : NULL,
8966 imt_arg, vtable_arg);
8969 link_bblock (cfg, bblock, end_bblock);
8970 start_new_bblock = 1;
8972 // FIXME: Eliminate unreachable epilogs
8975 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
8976 * only reachable from this call.
8978 GET_BBLOCK (cfg, tblock, ip + 5);
8979 if (tblock == bblock || tblock->in_count == 0)
8986 /* End of call, INS should contain the result of the call, if any */
8988 if (push_res && !MONO_TYPE_IS_VOID (fsig->ret)) {
8991 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
8996 if (keep_this_alive) {
8997 MonoInst *dummy_use;
8999 /* See mono_emit_method_call_full () */
9000 EMIT_NEW_DUMMY_USE (cfg, dummy_use, keep_this_alive);
9003 CHECK_CFG_EXCEPTION;
9007 g_assert (*ip == CEE_RET);
9011 constrained_call = NULL;
9013 emit_seq_point (cfg, method, ip, FALSE, TRUE);
9017 if (cfg->method != method) {
9018 /* return from inlined method */
9020 * If in_count == 0, that means the ret is unreachable due to
9021 * being preceeded by a throw. In that case, inline_method () will
9022 * handle setting the return value
9023 * (test case: test_0_inline_throw ()).
9025 if (return_var && cfg->cbb->in_count) {
9026 MonoType *ret_type = mono_method_signature (method)->ret;
9032 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
9035 //g_assert (returnvar != -1);
9036 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
9037 cfg->ret_var_set = TRUE;
9040 emit_instrumentation_call (cfg, mono_profiler_method_leave);
9042 if (cfg->lmf_var && cfg->cbb->in_count)
9046 MonoType *ret_type = mini_replace_type (mono_method_signature (method)->ret);
9048 if (seq_points && !sym_seq_points) {
9050 * Place a seq point here too even through the IL stack is not
9051 * empty, so a step over on
9054 * will work correctly.
9056 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
9057 MONO_ADD_INS (cfg->cbb, ins);
9060 g_assert (!return_var);
9064 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
9067 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
9070 if (!cfg->vret_addr) {
9073 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
9075 EMIT_NEW_RETLOADA (cfg, ret_addr);
9077 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
9078 ins->klass = mono_class_from_mono_type (ret_type);
9081 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
9082 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
9083 MonoInst *iargs [1];
9087 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
9088 mono_arch_emit_setret (cfg, method, conv);
9090 mono_arch_emit_setret (cfg, method, *sp);
9093 mono_arch_emit_setret (cfg, method, *sp);
9098 if (sp != stack_start)
9100 MONO_INST_NEW (cfg, ins, OP_BR);
9102 ins->inst_target_bb = end_bblock;
9103 MONO_ADD_INS (bblock, ins);
9104 link_bblock (cfg, bblock, end_bblock);
9105 start_new_bblock = 1;
9109 MONO_INST_NEW (cfg, ins, OP_BR);
9111 target = ip + 1 + (signed char)(*ip);
9113 GET_BBLOCK (cfg, tblock, target);
9114 link_bblock (cfg, bblock, tblock);
9115 ins->inst_target_bb = tblock;
9116 if (sp != stack_start) {
9117 handle_stack_args (cfg, stack_start, sp - stack_start);
9119 CHECK_UNVERIFIABLE (cfg);
9121 MONO_ADD_INS (bblock, ins);
9122 start_new_bblock = 1;
9123 inline_costs += BRANCH_COST;
9137 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
9139 target = ip + 1 + *(signed char*)ip;
9145 inline_costs += BRANCH_COST;
9149 MONO_INST_NEW (cfg, ins, OP_BR);
9152 target = ip + 4 + (gint32)read32(ip);
9154 GET_BBLOCK (cfg, tblock, target);
9155 link_bblock (cfg, bblock, tblock);
9156 ins->inst_target_bb = tblock;
9157 if (sp != stack_start) {
9158 handle_stack_args (cfg, stack_start, sp - stack_start);
9160 CHECK_UNVERIFIABLE (cfg);
9163 MONO_ADD_INS (bblock, ins);
9165 start_new_bblock = 1;
9166 inline_costs += BRANCH_COST;
9173 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
9174 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
9175 guint32 opsize = is_short ? 1 : 4;
9177 CHECK_OPSIZE (opsize);
9179 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
9182 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
9187 GET_BBLOCK (cfg, tblock, target);
9188 link_bblock (cfg, bblock, tblock);
9189 GET_BBLOCK (cfg, tblock, ip);
9190 link_bblock (cfg, bblock, tblock);
9192 if (sp != stack_start) {
9193 handle_stack_args (cfg, stack_start, sp - stack_start);
9194 CHECK_UNVERIFIABLE (cfg);
9197 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
9198 cmp->sreg1 = sp [0]->dreg;
9199 type_from_op (cmp, sp [0], NULL);
9202 #if SIZEOF_REGISTER == 4
9203 if (cmp->opcode == OP_LCOMPARE_IMM) {
9204 /* Convert it to OP_LCOMPARE */
9205 MONO_INST_NEW (cfg, ins, OP_I8CONST);
9206 ins->type = STACK_I8;
9207 ins->dreg = alloc_dreg (cfg, STACK_I8);
9209 MONO_ADD_INS (bblock, ins);
9210 cmp->opcode = OP_LCOMPARE;
9211 cmp->sreg2 = ins->dreg;
9214 MONO_ADD_INS (bblock, cmp);
9216 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
9217 type_from_op (ins, sp [0], NULL);
9218 MONO_ADD_INS (bblock, ins);
9219 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
9220 GET_BBLOCK (cfg, tblock, target);
9221 ins->inst_true_bb = tblock;
9222 GET_BBLOCK (cfg, tblock, ip);
9223 ins->inst_false_bb = tblock;
9224 start_new_bblock = 2;
9227 inline_costs += BRANCH_COST;
9242 MONO_INST_NEW (cfg, ins, *ip);
9244 target = ip + 4 + (gint32)read32(ip);
9250 inline_costs += BRANCH_COST;
9254 MonoBasicBlock **targets;
9255 MonoBasicBlock *default_bblock;
9256 MonoJumpInfoBBTable *table;
9257 int offset_reg = alloc_preg (cfg);
9258 int target_reg = alloc_preg (cfg);
9259 int table_reg = alloc_preg (cfg);
9260 int sum_reg = alloc_preg (cfg);
9261 gboolean use_op_switch;
9265 n = read32 (ip + 1);
9268 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
9272 CHECK_OPSIZE (n * sizeof (guint32));
9273 target = ip + n * sizeof (guint32);
9275 GET_BBLOCK (cfg, default_bblock, target);
9276 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
9278 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
9279 for (i = 0; i < n; ++i) {
9280 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
9281 targets [i] = tblock;
9282 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
9286 if (sp != stack_start) {
9288 * Link the current bb with the targets as well, so handle_stack_args
9289 * will set their in_stack correctly.
9291 link_bblock (cfg, bblock, default_bblock);
9292 for (i = 0; i < n; ++i)
9293 link_bblock (cfg, bblock, targets [i]);
9295 handle_stack_args (cfg, stack_start, sp - stack_start);
9297 CHECK_UNVERIFIABLE (cfg);
9300 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
9301 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
9304 for (i = 0; i < n; ++i)
9305 link_bblock (cfg, bblock, targets [i]);
9307 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
9308 table->table = targets;
9309 table->table_size = n;
9311 use_op_switch = FALSE;
9313 /* ARM implements SWITCH statements differently */
9314 /* FIXME: Make it use the generic implementation */
9315 if (!cfg->compile_aot)
9316 use_op_switch = TRUE;
9319 if (COMPILE_LLVM (cfg))
9320 use_op_switch = TRUE;
9322 cfg->cbb->has_jump_table = 1;
9324 if (use_op_switch) {
9325 MONO_INST_NEW (cfg, ins, OP_SWITCH);
9326 ins->sreg1 = src1->dreg;
9327 ins->inst_p0 = table;
9328 ins->inst_many_bb = targets;
9329 ins->klass = GUINT_TO_POINTER (n);
9330 MONO_ADD_INS (cfg->cbb, ins);
9332 if (sizeof (gpointer) == 8)
9333 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
9335 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
9337 #if SIZEOF_REGISTER == 8
9338 /* The upper word might not be zero, and we add it to a 64 bit address later */
9339 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
9342 if (cfg->compile_aot) {
9343 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
9345 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
9346 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
9347 ins->inst_p0 = table;
9348 ins->dreg = table_reg;
9349 MONO_ADD_INS (cfg->cbb, ins);
9352 /* FIXME: Use load_memindex */
9353 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
9354 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
9355 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
9357 start_new_bblock = 1;
9358 inline_costs += (BRANCH_COST * 2);
9378 dreg = alloc_freg (cfg);
9381 dreg = alloc_lreg (cfg);
9384 dreg = alloc_ireg_ref (cfg);
9387 dreg = alloc_preg (cfg);
9390 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
9391 ins->type = ldind_type [*ip - CEE_LDIND_I1];
9392 ins->flags |= ins_flag;
9393 MONO_ADD_INS (bblock, ins);
9395 if (ins_flag & MONO_INST_VOLATILE) {
9396 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
9397 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
9413 if (ins_flag & MONO_INST_VOLATILE) {
9414 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
9415 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
9418 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
9419 ins->flags |= ins_flag;
9422 MONO_ADD_INS (bblock, ins);
9424 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0)))
9425 emit_write_barrier (cfg, sp [0], sp [1]);
9434 MONO_INST_NEW (cfg, ins, (*ip));
9436 ins->sreg1 = sp [0]->dreg;
9437 ins->sreg2 = sp [1]->dreg;
9438 type_from_op (ins, sp [0], sp [1]);
9440 ins->dreg = alloc_dreg ((cfg), (ins)->type);
9442 /* Use the immediate opcodes if possible */
9443 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
9444 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
9445 if (imm_opcode != -1) {
9446 ins->opcode = imm_opcode;
9447 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
9450 NULLIFY_INS (sp [1]);
9454 MONO_ADD_INS ((cfg)->cbb, (ins));
9456 *sp++ = mono_decompose_opcode (cfg, ins);
9473 MONO_INST_NEW (cfg, ins, (*ip));
9475 ins->sreg1 = sp [0]->dreg;
9476 ins->sreg2 = sp [1]->dreg;
9477 type_from_op (ins, sp [0], sp [1]);
9479 ADD_WIDEN_OP (ins, sp [0], sp [1]);
9480 ins->dreg = alloc_dreg ((cfg), (ins)->type);
9482 /* FIXME: Pass opcode to is_inst_imm */
9484 /* Use the immediate opcodes if possible */
9485 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
9488 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
9489 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
9490 /* Keep emulated opcodes which are optimized away later */
9491 if ((ins->opcode == OP_IREM_UN || ins->opcode == OP_IDIV_UN_IMM) && (cfg->opt & (MONO_OPT_CONSPROP | MONO_OPT_COPYPROP)) && sp [1]->opcode == OP_ICONST && mono_is_power_of_two (sp [1]->inst_c0) >= 0) {
9492 imm_opcode = mono_op_to_op_imm (ins->opcode);
9495 if (imm_opcode != -1) {
9496 ins->opcode = imm_opcode;
9497 if (sp [1]->opcode == OP_I8CONST) {
9498 #if SIZEOF_REGISTER == 8
9499 ins->inst_imm = sp [1]->inst_l;
9501 ins->inst_ls_word = sp [1]->inst_ls_word;
9502 ins->inst_ms_word = sp [1]->inst_ms_word;
9506 ins->inst_imm = (gssize)(sp [1]->inst_c0);
9509 /* Might be followed by an instruction added by ADD_WIDEN_OP */
9510 if (sp [1]->next == NULL)
9511 NULLIFY_INS (sp [1]);
9514 MONO_ADD_INS ((cfg)->cbb, (ins));
9516 *sp++ = mono_decompose_opcode (cfg, ins);
9529 case CEE_CONV_OVF_I8:
9530 case CEE_CONV_OVF_U8:
9534 /* Special case this earlier so we have long constants in the IR */
9535 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
9536 int data = sp [-1]->inst_c0;
9537 sp [-1]->opcode = OP_I8CONST;
9538 sp [-1]->type = STACK_I8;
9539 #if SIZEOF_REGISTER == 8
9540 if ((*ip) == CEE_CONV_U8)
9541 sp [-1]->inst_c0 = (guint32)data;
9543 sp [-1]->inst_c0 = data;
9545 sp [-1]->inst_ls_word = data;
9546 if ((*ip) == CEE_CONV_U8)
9547 sp [-1]->inst_ms_word = 0;
9549 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
9551 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
9558 case CEE_CONV_OVF_I4:
9559 case CEE_CONV_OVF_I1:
9560 case CEE_CONV_OVF_I2:
9561 case CEE_CONV_OVF_I:
9562 case CEE_CONV_OVF_U:
9565 if (sp [-1]->type == STACK_R8) {
9566 ADD_UNOP (CEE_CONV_OVF_I8);
9573 case CEE_CONV_OVF_U1:
9574 case CEE_CONV_OVF_U2:
9575 case CEE_CONV_OVF_U4:
9578 if (sp [-1]->type == STACK_R8) {
9579 ADD_UNOP (CEE_CONV_OVF_U8);
9586 case CEE_CONV_OVF_I1_UN:
9587 case CEE_CONV_OVF_I2_UN:
9588 case CEE_CONV_OVF_I4_UN:
9589 case CEE_CONV_OVF_I8_UN:
9590 case CEE_CONV_OVF_U1_UN:
9591 case CEE_CONV_OVF_U2_UN:
9592 case CEE_CONV_OVF_U4_UN:
9593 case CEE_CONV_OVF_U8_UN:
9594 case CEE_CONV_OVF_I_UN:
9595 case CEE_CONV_OVF_U_UN:
9602 CHECK_CFG_EXCEPTION;
9606 case CEE_ADD_OVF_UN:
9608 case CEE_MUL_OVF_UN:
9610 case CEE_SUB_OVF_UN:
9616 GSHAREDVT_FAILURE (*ip);
9619 token = read32 (ip + 1);
9620 klass = mini_get_class (method, token, generic_context);
9621 CHECK_TYPELOAD (klass);
9623 if (generic_class_is_reference_type (cfg, klass)) {
9624 MonoInst *store, *load;
9625 int dreg = alloc_ireg_ref (cfg);
9627 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
9628 load->flags |= ins_flag;
9629 MONO_ADD_INS (cfg->cbb, load);
9631 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
9632 store->flags |= ins_flag;
9633 MONO_ADD_INS (cfg->cbb, store);
9635 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER)
9636 emit_write_barrier (cfg, sp [0], sp [1]);
9638 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
9650 token = read32 (ip + 1);
9651 klass = mini_get_class (method, token, generic_context);
9652 CHECK_TYPELOAD (klass);
9654 /* Optimize the common ldobj+stloc combination */
9664 loc_index = ip [5] - CEE_STLOC_0;
9671 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
9672 CHECK_LOCAL (loc_index);
9674 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
9675 ins->dreg = cfg->locals [loc_index]->dreg;
9676 ins->flags |= ins_flag;
9679 if (ins_flag & MONO_INST_VOLATILE) {
9680 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
9681 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
9687 /* Optimize the ldobj+stobj combination */
9688 /* The reference case ends up being a load+store anyway */
9689 /* Skip this if the operation is volatile. */
9690 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass) && !(ins_flag & MONO_INST_VOLATILE)) {
9695 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
9702 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
9703 ins->flags |= ins_flag;
9706 if (ins_flag & MONO_INST_VOLATILE) {
9707 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
9708 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
9717 CHECK_STACK_OVF (1);
9719 n = read32 (ip + 1);
9721 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
9722 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
9723 ins->type = STACK_OBJ;
9726 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
9727 MonoInst *iargs [1];
9728 char *str = mono_method_get_wrapper_data (method, n);
9730 if (cfg->compile_aot)
9731 EMIT_NEW_LDSTRLITCONST (cfg, iargs [0], str);
9733 EMIT_NEW_PCONST (cfg, iargs [0], str);
9734 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
9736 if (cfg->opt & MONO_OPT_SHARED) {
9737 MonoInst *iargs [3];
9739 if (cfg->compile_aot) {
9740 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
9742 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
9743 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
9744 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
9745 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
9746 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
9748 if (bblock->out_of_line) {
9749 MonoInst *iargs [2];
9751 if (image == mono_defaults.corlib) {
9753 * Avoid relocations in AOT and save some space by using a
9754 * version of helper_ldstr specialized to mscorlib.
9756 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
9757 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
9759 /* Avoid creating the string object */
9760 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
9761 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
9762 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
9766 if (cfg->compile_aot) {
9767 NEW_LDSTRCONST (cfg, ins, image, n);
9769 MONO_ADD_INS (bblock, ins);
9772 NEW_PCONST (cfg, ins, NULL);
9773 ins->type = STACK_OBJ;
9774 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
9776 OUT_OF_MEMORY_FAILURE;
9779 MONO_ADD_INS (bblock, ins);
9788 MonoInst *iargs [2];
9789 MonoMethodSignature *fsig;
9792 MonoInst *vtable_arg = NULL;
9795 token = read32 (ip + 1);
9796 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
9797 if (!cmethod || mono_loader_get_last_error ())
9799 fsig = mono_method_get_signature_checked (cmethod, image, token, NULL, &cfg->error);
9802 mono_save_token_info (cfg, image, token, cmethod);
9804 if (!mono_class_init (cmethod->klass))
9805 TYPE_LOAD_ERROR (cmethod->klass);
9807 context_used = mini_method_check_context_used (cfg, cmethod);
9809 if (mono_security_cas_enabled ()) {
9810 if (check_linkdemand (cfg, method, cmethod))
9811 INLINE_FAILURE ("linkdemand");
9812 CHECK_CFG_EXCEPTION;
9813 } else if (mono_security_core_clr_enabled ()) {
9814 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9817 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
9818 emit_generic_class_init (cfg, cmethod->klass);
9819 CHECK_TYPELOAD (cmethod->klass);
9823 if (cfg->gsharedvt) {
9824 if (mini_is_gsharedvt_variable_signature (sig))
9825 GSHAREDVT_FAILURE (*ip);
9829 n = fsig->param_count;
9833 * Generate smaller code for the common newobj <exception> instruction in
9834 * argument checking code.
9836 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
9837 is_exception_class (cmethod->klass) && n <= 2 &&
9838 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
9839 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
9840 MonoInst *iargs [3];
9844 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
9847 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
9851 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
9856 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
9859 g_assert_not_reached ();
9867 /* move the args to allow room for 'this' in the first position */
9873 /* check_call_signature () requires sp[0] to be set */
9874 this_ins.type = STACK_OBJ;
9876 if (check_call_signature (cfg, fsig, sp))
9881 if (mini_class_is_system_array (cmethod->klass)) {
9882 *sp = emit_get_rgctx_method (cfg, context_used,
9883 cmethod, MONO_RGCTX_INFO_METHOD);
9885 /* Avoid varargs in the common case */
9886 if (fsig->param_count == 1)
9887 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
9888 else if (fsig->param_count == 2)
9889 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
9890 else if (fsig->param_count == 3)
9891 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
9892 else if (fsig->param_count == 4)
9893 alloc = mono_emit_jit_icall (cfg, mono_array_new_4, sp);
9895 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
9896 } else if (cmethod->string_ctor) {
9897 g_assert (!context_used);
9898 g_assert (!vtable_arg);
9899 /* we simply pass a null pointer */
9900 EMIT_NEW_PCONST (cfg, *sp, NULL);
9901 /* now call the string ctor */
9902 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, NULL, NULL, NULL);
9904 if (cmethod->klass->valuetype) {
9905 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
9906 emit_init_rvar (cfg, iargs [0]->dreg, &cmethod->klass->byval_arg);
9907 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
9912 * The code generated by mini_emit_virtual_call () expects
9913 * iargs [0] to be a boxed instance, but luckily the vcall
9914 * will be transformed into a normal call there.
9916 } else if (context_used) {
9917 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
9920 MonoVTable *vtable = NULL;
9922 if (!cfg->compile_aot)
9923 vtable = mono_class_vtable (cfg->domain, cmethod->klass);
9924 CHECK_TYPELOAD (cmethod->klass);
9927 * TypeInitializationExceptions thrown from the mono_runtime_class_init
9928 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
9929 * As a workaround, we call class cctors before allocating objects.
9931 if (mini_field_access_needs_cctor_run (cfg, method, cmethod->klass, vtable) && !(g_slist_find (class_inits, cmethod->klass))) {
9932 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, cmethod->klass, helper_sig_class_init_trampoline, NULL);
9933 if (cfg->verbose_level > 2)
9934 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
9935 class_inits = g_slist_prepend (class_inits, cmethod->klass);
9938 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
9941 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
9944 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
9946 /* Now call the actual ctor */
9947 handle_ctor_call (cfg, cmethod, fsig, context_used, sp, ip, &bblock, &inline_costs);
9948 CHECK_CFG_EXCEPTION;
9951 if (alloc == NULL) {
9953 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
9954 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
9962 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip - header->code)))
9963 emit_seq_point (cfg, method, ip, FALSE, TRUE);
9970 token = read32 (ip + 1);
9971 klass = mini_get_class (method, token, generic_context);
9972 CHECK_TYPELOAD (klass);
9973 if (sp [0]->type != STACK_OBJ)
9976 ins = handle_castclass (cfg, klass, *sp, ip, &bblock, &inline_costs);
9977 CHECK_CFG_EXCEPTION;
9986 token = read32 (ip + 1);
9987 klass = mini_get_class (method, token, generic_context);
9988 CHECK_TYPELOAD (klass);
9989 if (sp [0]->type != STACK_OBJ)
9992 context_used = mini_class_check_context_used (cfg, klass);
9994 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
9995 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
10002 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
10005 if (cfg->compile_aot)
10006 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
10008 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
10010 *sp++ = mono_emit_method_call (cfg, mono_isinst, args, NULL);
10013 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
10014 MonoMethod *mono_isinst;
10015 MonoInst *iargs [1];
10018 mono_isinst = mono_marshal_get_isinst (klass);
10019 iargs [0] = sp [0];
10021 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
10022 iargs, ip, cfg->real_offset, TRUE, &bblock);
10023 CHECK_CFG_EXCEPTION;
10024 g_assert (costs > 0);
10027 cfg->real_offset += 5;
10031 inline_costs += costs;
10034 ins = handle_isinst (cfg, klass, *sp, context_used);
10035 CHECK_CFG_EXCEPTION;
10042 case CEE_UNBOX_ANY: {
10043 MonoInst *res, *addr;
10048 token = read32 (ip + 1);
10049 klass = mini_get_class (method, token, generic_context);
10050 CHECK_TYPELOAD (klass);
10052 mono_save_token_info (cfg, image, token, klass);
10054 context_used = mini_class_check_context_used (cfg, klass);
10056 if (mini_is_gsharedvt_klass (cfg, klass)) {
10057 res = handle_unbox_gsharedvt (cfg, klass, *sp, &bblock);
10059 } else if (generic_class_is_reference_type (cfg, klass)) {
10060 res = handle_castclass (cfg, klass, *sp, ip, &bblock, &inline_costs);
10061 CHECK_CFG_EXCEPTION;
10062 } else if (mono_class_is_nullable (klass)) {
10063 res = handle_unbox_nullable (cfg, *sp, klass, context_used);
10065 addr = handle_unbox (cfg, klass, sp, context_used);
10067 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
10083 token = read32 (ip + 1);
10084 klass = mini_get_class (method, token, generic_context);
10085 CHECK_TYPELOAD (klass);
10087 mono_save_token_info (cfg, image, token, klass);
10089 context_used = mini_class_check_context_used (cfg, klass);
10091 if (generic_class_is_reference_type (cfg, klass)) {
10097 if (klass == mono_defaults.void_class)
10099 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
10101 /* frequent check in generic code: box (struct), brtrue */
10103 // FIXME: LLVM can't handle the inconsistent bb linking
10104 if (!mono_class_is_nullable (klass) &&
10105 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) &&
10106 (ip [5] == CEE_BRTRUE ||
10107 ip [5] == CEE_BRTRUE_S ||
10108 ip [5] == CEE_BRFALSE ||
10109 ip [5] == CEE_BRFALSE_S)) {
10110 gboolean is_true = ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S;
10112 MonoBasicBlock *true_bb, *false_bb;
10116 if (cfg->verbose_level > 3) {
10117 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
10118 printf ("<box+brtrue opt>\n");
10123 case CEE_BRFALSE_S:
10126 target = ip + 1 + (signed char)(*ip);
10133 target = ip + 4 + (gint)(read32 (ip));
10137 g_assert_not_reached ();
10141 * We need to link both bblocks, since it is needed for handling stack
10142 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
10143 * Branching to only one of them would lead to inconsistencies, so
10144 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
10146 GET_BBLOCK (cfg, true_bb, target);
10147 GET_BBLOCK (cfg, false_bb, ip);
10149 mono_link_bblock (cfg, cfg->cbb, true_bb);
10150 mono_link_bblock (cfg, cfg->cbb, false_bb);
10152 if (sp != stack_start) {
10153 handle_stack_args (cfg, stack_start, sp - stack_start);
10155 CHECK_UNVERIFIABLE (cfg);
10158 if (COMPILE_LLVM (cfg)) {
10159 dreg = alloc_ireg (cfg);
10160 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
10161 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
10163 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
10165 /* The JIT can't eliminate the iconst+compare */
10166 MONO_INST_NEW (cfg, ins, OP_BR);
10167 ins->inst_target_bb = is_true ? true_bb : false_bb;
10168 MONO_ADD_INS (cfg->cbb, ins);
10171 start_new_bblock = 1;
10175 *sp++ = handle_box (cfg, val, klass, context_used, &bblock);
10177 CHECK_CFG_EXCEPTION;
10186 token = read32 (ip + 1);
10187 klass = mini_get_class (method, token, generic_context);
10188 CHECK_TYPELOAD (klass);
10190 mono_save_token_info (cfg, image, token, klass);
10192 context_used = mini_class_check_context_used (cfg, klass);
10194 if (mono_class_is_nullable (klass)) {
10197 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
10198 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
10202 ins = handle_unbox (cfg, klass, sp, context_used);
10215 MonoClassField *field;
10216 #ifndef DISABLE_REMOTING
10220 gboolean is_instance;
10222 gpointer addr = NULL;
10223 gboolean is_special_static;
10225 MonoInst *store_val = NULL;
10226 MonoInst *thread_ins;
10229 is_instance = (op == CEE_LDFLD || op == CEE_LDFLDA || op == CEE_STFLD);
10231 if (op == CEE_STFLD) {
10234 store_val = sp [1];
10239 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
10241 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
10244 if (op == CEE_STSFLD) {
10247 store_val = sp [0];
10252 token = read32 (ip + 1);
10253 if (method->wrapper_type != MONO_WRAPPER_NONE) {
10254 field = mono_method_get_wrapper_data (method, token);
10255 klass = field->parent;
10258 field = mono_field_from_token_checked (image, token, &klass, generic_context, &cfg->error);
10261 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
10262 FIELD_ACCESS_FAILURE (method, field);
10263 mono_class_init (klass);
10265 if (is_instance && *ip != CEE_LDFLDA && is_magic_tls_access (field))
10268 /* if the class is Critical then transparent code cannot access it's fields */
10269 if (!is_instance && mono_security_core_clr_enabled ())
10270 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
10272 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
10273 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
10274 if (mono_security_core_clr_enabled ())
10275 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
10279 * LDFLD etc. is usable on static fields as well, so convert those cases to
10282 if (is_instance && field->type->attrs & FIELD_ATTRIBUTE_STATIC) {
10294 g_assert_not_reached ();
10296 is_instance = FALSE;
10299 context_used = mini_class_check_context_used (cfg, klass);
10301 /* INSTANCE CASE */
10303 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
10304 if (op == CEE_STFLD) {
10305 if (target_type_is_incompatible (cfg, field->type, sp [1]))
10307 #ifndef DISABLE_REMOTING
10308 if ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class) {
10309 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
10310 MonoInst *iargs [5];
10312 GSHAREDVT_FAILURE (op);
10314 iargs [0] = sp [0];
10315 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10316 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
10317 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
10319 iargs [4] = sp [1];
10321 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
10322 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
10323 iargs, ip, cfg->real_offset, TRUE, &bblock);
10324 CHECK_CFG_EXCEPTION;
10325 g_assert (costs > 0);
10327 cfg->real_offset += 5;
10329 inline_costs += costs;
10331 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
10338 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
10340 if (mini_is_gsharedvt_klass (cfg, klass)) {
10341 MonoInst *offset_ins;
10343 context_used = mini_class_check_context_used (cfg, klass);
10345 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10346 dreg = alloc_ireg_mp (cfg);
10347 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10348 /* The decomposition will call mini_emit_stobj () which will emit a wbarrier if needed */
10349 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, dreg, 0, sp [1]->dreg);
10351 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
10353 if (sp [0]->opcode != OP_LDADDR)
10354 store->flags |= MONO_INST_FAULT;
10356 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
10357 /* insert call to write barrier */
10361 dreg = alloc_ireg_mp (cfg);
10362 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
10363 emit_write_barrier (cfg, ptr, sp [1]);
10366 store->flags |= ins_flag;
10373 #ifndef DISABLE_REMOTING
10374 if (is_instance && ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class)) {
10375 MonoMethod *wrapper = (op == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
10376 MonoInst *iargs [4];
10378 GSHAREDVT_FAILURE (op);
10380 iargs [0] = sp [0];
10381 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10382 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
10383 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
10384 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
10385 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
10386 iargs, ip, cfg->real_offset, TRUE, &bblock);
10387 CHECK_CFG_EXCEPTION;
10388 g_assert (costs > 0);
10390 cfg->real_offset += 5;
10394 inline_costs += costs;
10396 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
10402 if (sp [0]->type == STACK_VTYPE) {
10405 /* Have to compute the address of the variable */
10407 var = get_vreg_to_inst (cfg, sp [0]->dreg);
10409 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
10411 g_assert (var->klass == klass);
10413 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
10417 if (op == CEE_LDFLDA) {
10418 if (is_magic_tls_access (field)) {
10419 GSHAREDVT_FAILURE (*ip);
10421 *sp++ = create_magic_tls_access (cfg, field, &cached_tls_addr, ins);
10423 if (sp [0]->type == STACK_OBJ) {
10424 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
10425 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
10428 dreg = alloc_ireg_mp (cfg);
10430 if (mini_is_gsharedvt_klass (cfg, klass)) {
10431 MonoInst *offset_ins;
10433 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10434 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10436 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
10438 ins->klass = mono_class_from_mono_type (field->type);
10439 ins->type = STACK_MP;
10445 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
10447 if (mini_is_gsharedvt_klass (cfg, klass)) {
10448 MonoInst *offset_ins;
10450 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10451 dreg = alloc_ireg_mp (cfg);
10452 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10453 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, dreg, 0);
10455 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
10457 load->flags |= ins_flag;
10458 if (sp [0]->opcode != OP_LDADDR)
10459 load->flags |= MONO_INST_FAULT;
10473 * We can only support shared generic static
10474 * field access on architectures where the
10475 * trampoline code has been extended to handle
10476 * the generic class init.
10478 #ifndef MONO_ARCH_VTABLE_REG
10479 GENERIC_SHARING_FAILURE (op);
10482 context_used = mini_class_check_context_used (cfg, klass);
10484 ftype = mono_field_get_type (field);
10486 if (ftype->attrs & FIELD_ATTRIBUTE_LITERAL)
10489 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
10490 * to be called here.
10492 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
10493 mono_class_vtable (cfg->domain, klass);
10494 CHECK_TYPELOAD (klass);
10496 mono_domain_lock (cfg->domain);
10497 if (cfg->domain->special_static_fields)
10498 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
10499 mono_domain_unlock (cfg->domain);
10501 is_special_static = mono_class_field_is_special_static (field);
10503 if (is_special_static && ((gsize)addr & 0x80000000) == 0)
10504 thread_ins = mono_get_thread_intrinsic (cfg);
10508 /* Generate IR to compute the field address */
10509 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && thread_ins && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
10511 * Fast access to TLS data
10512 * Inline version of get_thread_static_data () in
10516 int idx, static_data_reg, array_reg, dreg;
10518 GSHAREDVT_FAILURE (op);
10520 // offset &= 0x7fffffff;
10521 // idx = (offset >> 24) - 1;
10522 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
10523 MONO_ADD_INS (cfg->cbb, thread_ins);
10524 static_data_reg = alloc_ireg (cfg);
10525 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, MONO_STRUCT_OFFSET (MonoInternalThread, static_data));
10527 if (cfg->compile_aot) {
10528 int offset_reg, offset2_reg, idx_reg;
10530 /* For TLS variables, this will return the TLS offset */
10531 EMIT_NEW_SFLDACONST (cfg, ins, field);
10532 offset_reg = ins->dreg;
10533 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
10534 idx_reg = alloc_ireg (cfg);
10535 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
10536 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
10537 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
10538 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
10539 array_reg = alloc_ireg (cfg);
10540 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
10541 offset2_reg = alloc_ireg (cfg);
10542 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
10543 dreg = alloc_ireg (cfg);
10544 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
10546 offset = (gsize)addr & 0x7fffffff;
10547 idx = (offset >> 24) - 1;
10549 array_reg = alloc_ireg (cfg);
10550 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
10551 dreg = alloc_ireg (cfg);
10552 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, (offset & 0xffffff));
10554 } else if ((cfg->opt & MONO_OPT_SHARED) ||
10555 (cfg->compile_aot && is_special_static) ||
10556 (context_used && is_special_static)) {
10557 MonoInst *iargs [2];
10559 g_assert (field->parent);
10560 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10561 if (context_used) {
10562 iargs [1] = emit_get_rgctx_field (cfg, context_used,
10563 field, MONO_RGCTX_INFO_CLASS_FIELD);
10565 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
10567 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
10568 } else if (context_used) {
10569 MonoInst *static_data;
10572 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
10573 method->klass->name_space, method->klass->name, method->name,
10574 depth, field->offset);
10577 if (mono_class_needs_cctor_run (klass, method))
10578 emit_generic_class_init (cfg, klass);
10581 * The pointer we're computing here is
10583 * super_info.static_data + field->offset
10585 static_data = emit_get_rgctx_klass (cfg, context_used,
10586 klass, MONO_RGCTX_INFO_STATIC_DATA);
10588 if (mini_is_gsharedvt_klass (cfg, klass)) {
10589 MonoInst *offset_ins;
10591 offset_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10592 dreg = alloc_ireg_mp (cfg);
10593 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, static_data->dreg, offset_ins->dreg);
10594 } else if (field->offset == 0) {
10597 int addr_reg = mono_alloc_preg (cfg);
10598 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
10600 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
10601 MonoInst *iargs [2];
10603 g_assert (field->parent);
10604 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10605 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
10606 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
10608 MonoVTable *vtable = NULL;
10610 if (!cfg->compile_aot)
10611 vtable = mono_class_vtable (cfg->domain, klass);
10612 CHECK_TYPELOAD (klass);
10615 if (mini_field_access_needs_cctor_run (cfg, method, klass, vtable)) {
10616 if (!(g_slist_find (class_inits, klass))) {
10617 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, klass, helper_sig_class_init_trampoline, NULL);
10618 if (cfg->verbose_level > 2)
10619 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
10620 class_inits = g_slist_prepend (class_inits, klass);
10623 if (cfg->run_cctors) {
10625 /* This makes so that inline cannot trigger */
10626 /* .cctors: too many apps depend on them */
10627 /* running with a specific order... */
10629 if (! vtable->initialized)
10630 INLINE_FAILURE ("class init");
10631 ex = mono_runtime_class_init_full (vtable, FALSE);
10633 set_exception_object (cfg, ex);
10634 goto exception_exit;
10638 if (cfg->compile_aot)
10639 EMIT_NEW_SFLDACONST (cfg, ins, field);
10642 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
10644 EMIT_NEW_PCONST (cfg, ins, addr);
10647 MonoInst *iargs [1];
10648 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
10649 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
10653 /* Generate IR to do the actual load/store operation */
10655 if ((op == CEE_STFLD || op == CEE_STSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
10656 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
10657 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
10660 if (op == CEE_LDSFLDA) {
10661 ins->klass = mono_class_from_mono_type (ftype);
10662 ins->type = STACK_PTR;
10664 } else if (op == CEE_STSFLD) {
10667 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, ftype, ins->dreg, 0, store_val->dreg);
10668 store->flags |= ins_flag;
10670 gboolean is_const = FALSE;
10671 MonoVTable *vtable = NULL;
10672 gpointer addr = NULL;
10674 if (!context_used) {
10675 vtable = mono_class_vtable (cfg->domain, klass);
10676 CHECK_TYPELOAD (klass);
10678 if ((ftype->attrs & FIELD_ATTRIBUTE_INIT_ONLY) && (((addr = mono_aot_readonly_field_override (field)) != NULL) ||
10679 (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) && vtable->initialized))) {
10680 int ro_type = ftype->type;
10682 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
10683 if (ro_type == MONO_TYPE_VALUETYPE && ftype->data.klass->enumtype) {
10684 ro_type = mono_class_enum_basetype (ftype->data.klass)->type;
10687 GSHAREDVT_FAILURE (op);
10689 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
10692 case MONO_TYPE_BOOLEAN:
10694 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
10698 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
10701 case MONO_TYPE_CHAR:
10703 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
10707 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
10712 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
10716 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
10721 case MONO_TYPE_PTR:
10722 case MONO_TYPE_FNPTR:
10723 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
10724 type_to_eval_stack_type ((cfg), field->type, *sp);
10727 case MONO_TYPE_STRING:
10728 case MONO_TYPE_OBJECT:
10729 case MONO_TYPE_CLASS:
10730 case MONO_TYPE_SZARRAY:
10731 case MONO_TYPE_ARRAY:
10732 if (!mono_gc_is_moving ()) {
10733 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
10734 type_to_eval_stack_type ((cfg), field->type, *sp);
10742 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
10747 case MONO_TYPE_VALUETYPE:
10757 CHECK_STACK_OVF (1);
10759 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
10760 load->flags |= ins_flag;
10766 if ((op == CEE_LDFLD || op == CEE_LDSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
10767 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10768 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10779 token = read32 (ip + 1);
10780 klass = mini_get_class (method, token, generic_context);
10781 CHECK_TYPELOAD (klass);
10782 if (ins_flag & MONO_INST_VOLATILE) {
10783 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
10784 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
10786 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
10787 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
10788 ins->flags |= ins_flag;
10789 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
10790 generic_class_is_reference_type (cfg, klass)) {
10791 /* insert call to write barrier */
10792 emit_write_barrier (cfg, sp [0], sp [1]);
10804 const char *data_ptr;
10806 guint32 field_token;
10812 token = read32 (ip + 1);
10814 klass = mini_get_class (method, token, generic_context);
10815 CHECK_TYPELOAD (klass);
10817 context_used = mini_class_check_context_used (cfg, klass);
10819 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
10820 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_OVF_U4);
10821 ins->sreg1 = sp [0]->dreg;
10822 ins->type = STACK_I4;
10823 ins->dreg = alloc_ireg (cfg);
10824 MONO_ADD_INS (cfg->cbb, ins);
10825 *sp = mono_decompose_opcode (cfg, ins);
10828 if (context_used) {
10829 MonoInst *args [3];
10830 MonoClass *array_class = mono_array_class_get (klass, 1);
10831 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class);
10833 /* FIXME: Use OP_NEWARR and decompose later to help abcrem */
10836 args [0] = emit_get_rgctx_klass (cfg, context_used,
10837 array_class, MONO_RGCTX_INFO_VTABLE);
10842 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
10844 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
10846 if (cfg->opt & MONO_OPT_SHARED) {
10847 /* Decompose now to avoid problems with references to the domainvar */
10848 MonoInst *iargs [3];
10850 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10851 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10852 iargs [2] = sp [0];
10854 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
10856 /* Decompose later since it is needed by abcrem */
10857 MonoClass *array_type = mono_array_class_get (klass, 1);
10858 mono_class_vtable (cfg->domain, array_type);
10859 CHECK_TYPELOAD (array_type);
10861 MONO_INST_NEW (cfg, ins, OP_NEWARR);
10862 ins->dreg = alloc_ireg_ref (cfg);
10863 ins->sreg1 = sp [0]->dreg;
10864 ins->inst_newa_class = klass;
10865 ins->type = STACK_OBJ;
10866 ins->klass = array_type;
10867 MONO_ADD_INS (cfg->cbb, ins);
10868 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
10869 cfg->cbb->has_array_access = TRUE;
10871 /* Needed so mono_emit_load_get_addr () gets called */
10872 mono_get_got_var (cfg);
10882 * we inline/optimize the initialization sequence if possible.
10883 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
10884 * for small sizes open code the memcpy
10885 * ensure the rva field is big enough
10887 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
10888 MonoMethod *memcpy_method = get_memcpy_method ();
10889 MonoInst *iargs [3];
10890 int add_reg = alloc_ireg_mp (cfg);
10892 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, MONO_STRUCT_OFFSET (MonoArray, vector));
10893 if (cfg->compile_aot) {
10894 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
10896 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
10898 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
10899 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
10908 if (sp [0]->type != STACK_OBJ)
10911 MONO_INST_NEW (cfg, ins, OP_LDLEN);
10912 ins->dreg = alloc_preg (cfg);
10913 ins->sreg1 = sp [0]->dreg;
10914 ins->type = STACK_I4;
10915 /* This flag will be inherited by the decomposition */
10916 ins->flags |= MONO_INST_FAULT;
10917 MONO_ADD_INS (cfg->cbb, ins);
10918 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
10919 cfg->cbb->has_array_access = TRUE;
10927 if (sp [0]->type != STACK_OBJ)
10930 cfg->flags |= MONO_CFG_HAS_LDELEMA;
10932 klass = mini_get_class (method, read32 (ip + 1), generic_context);
10933 CHECK_TYPELOAD (klass);
10934 /* we need to make sure that this array is exactly the type it needs
10935 * to be for correctness. the wrappers are lax with their usage
10936 * so we need to ignore them here
10938 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
10939 MonoClass *array_class = mono_array_class_get (klass, 1);
10940 mini_emit_check_array_type (cfg, sp [0], array_class);
10941 CHECK_TYPELOAD (array_class);
10945 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
10950 case CEE_LDELEM_I1:
10951 case CEE_LDELEM_U1:
10952 case CEE_LDELEM_I2:
10953 case CEE_LDELEM_U2:
10954 case CEE_LDELEM_I4:
10955 case CEE_LDELEM_U4:
10956 case CEE_LDELEM_I8:
10958 case CEE_LDELEM_R4:
10959 case CEE_LDELEM_R8:
10960 case CEE_LDELEM_REF: {
10966 if (*ip == CEE_LDELEM) {
10968 token = read32 (ip + 1);
10969 klass = mini_get_class (method, token, generic_context);
10970 CHECK_TYPELOAD (klass);
10971 mono_class_init (klass);
10974 klass = array_access_to_klass (*ip);
10976 if (sp [0]->type != STACK_OBJ)
10979 cfg->flags |= MONO_CFG_HAS_LDELEMA;
10981 if (mini_is_gsharedvt_variable_klass (cfg, klass)) {
10982 // FIXME-VT: OP_ICONST optimization
10983 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
10984 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
10985 ins->opcode = OP_LOADV_MEMBASE;
10986 } else if (sp [1]->opcode == OP_ICONST) {
10987 int array_reg = sp [0]->dreg;
10988 int index_reg = sp [1]->dreg;
10989 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
10991 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
10992 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
10994 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
10995 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
10998 if (*ip == CEE_LDELEM)
11005 case CEE_STELEM_I1:
11006 case CEE_STELEM_I2:
11007 case CEE_STELEM_I4:
11008 case CEE_STELEM_I8:
11009 case CEE_STELEM_R4:
11010 case CEE_STELEM_R8:
11011 case CEE_STELEM_REF:
11016 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11018 if (*ip == CEE_STELEM) {
11020 token = read32 (ip + 1);
11021 klass = mini_get_class (method, token, generic_context);
11022 CHECK_TYPELOAD (klass);
11023 mono_class_init (klass);
11026 klass = array_access_to_klass (*ip);
11028 if (sp [0]->type != STACK_OBJ)
11031 emit_array_store (cfg, klass, sp, TRUE);
11033 if (*ip == CEE_STELEM)
11040 case CEE_CKFINITE: {
11044 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
11045 ins->sreg1 = sp [0]->dreg;
11046 ins->dreg = alloc_freg (cfg);
11047 ins->type = STACK_R8;
11048 MONO_ADD_INS (bblock, ins);
11050 *sp++ = mono_decompose_opcode (cfg, ins);
11055 case CEE_REFANYVAL: {
11056 MonoInst *src_var, *src;
11058 int klass_reg = alloc_preg (cfg);
11059 int dreg = alloc_preg (cfg);
11061 GSHAREDVT_FAILURE (*ip);
11064 MONO_INST_NEW (cfg, ins, *ip);
11067 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11068 CHECK_TYPELOAD (klass);
11070 context_used = mini_class_check_context_used (cfg, klass);
11073 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
11075 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
11076 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
11077 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass));
11079 if (context_used) {
11080 MonoInst *klass_ins;
11082 klass_ins = emit_get_rgctx_klass (cfg, context_used,
11083 klass, MONO_RGCTX_INFO_KLASS);
11086 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
11087 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
11089 mini_emit_class_check (cfg, klass_reg, klass);
11091 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value));
11092 ins->type = STACK_MP;
11097 case CEE_MKREFANY: {
11098 MonoInst *loc, *addr;
11100 GSHAREDVT_FAILURE (*ip);
11103 MONO_INST_NEW (cfg, ins, *ip);
11106 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11107 CHECK_TYPELOAD (klass);
11109 context_used = mini_class_check_context_used (cfg, klass);
11111 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
11112 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
11114 if (context_used) {
11115 MonoInst *const_ins;
11116 int type_reg = alloc_preg (cfg);
11118 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
11119 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
11120 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
11121 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
11122 } else if (cfg->compile_aot) {
11123 int const_reg = alloc_preg (cfg);
11124 int type_reg = alloc_preg (cfg);
11126 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
11127 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
11128 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
11129 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
11131 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
11132 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), klass);
11134 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
11136 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
11137 ins->type = STACK_VTYPE;
11138 ins->klass = mono_defaults.typed_reference_class;
11143 case CEE_LDTOKEN: {
11145 MonoClass *handle_class;
11147 CHECK_STACK_OVF (1);
11150 n = read32 (ip + 1);
11152 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
11153 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
11154 handle = mono_method_get_wrapper_data (method, n);
11155 handle_class = mono_method_get_wrapper_data (method, n + 1);
11156 if (handle_class == mono_defaults.typehandle_class)
11157 handle = &((MonoClass*)handle)->byval_arg;
11160 handle = mono_ldtoken_checked (image, n, &handle_class, generic_context, &cfg->error);
11165 mono_class_init (handle_class);
11166 if (cfg->generic_sharing_context) {
11167 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
11168 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
11169 /* This case handles ldtoken
11170 of an open type, like for
11173 } else if (handle_class == mono_defaults.typehandle_class) {
11174 context_used = mini_class_check_context_used (cfg, mono_class_from_mono_type (handle));
11175 } else if (handle_class == mono_defaults.fieldhandle_class)
11176 context_used = mini_class_check_context_used (cfg, ((MonoClassField*)handle)->parent);
11177 else if (handle_class == mono_defaults.methodhandle_class)
11178 context_used = mini_method_check_context_used (cfg, handle);
11180 g_assert_not_reached ();
11183 if ((cfg->opt & MONO_OPT_SHARED) &&
11184 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
11185 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
11186 MonoInst *addr, *vtvar, *iargs [3];
11187 int method_context_used;
11189 method_context_used = mini_method_check_context_used (cfg, method);
11191 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
11193 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
11194 EMIT_NEW_ICONST (cfg, iargs [1], n);
11195 if (method_context_used) {
11196 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
11197 method, MONO_RGCTX_INFO_METHOD);
11198 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
11200 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
11201 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
11203 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11205 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
11207 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11209 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
11210 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
11211 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
11212 (cmethod->klass == mono_defaults.systemtype_class) &&
11213 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
11214 MonoClass *tclass = mono_class_from_mono_type (handle);
11216 mono_class_init (tclass);
11217 if (context_used) {
11218 ins = emit_get_rgctx_klass (cfg, context_used,
11219 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
11220 } else if (cfg->compile_aot) {
11221 if (method->wrapper_type) {
11222 mono_error_init (&error); //got to do it since there are multiple conditionals below
11223 if (mono_class_get_checked (tclass->image, tclass->type_token, &error) == tclass && !generic_context) {
11224 /* Special case for static synchronized wrappers */
11225 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
11227 mono_error_cleanup (&error); /* FIXME don't swallow the error */
11228 /* FIXME: n is not a normal token */
11230 EMIT_NEW_PCONST (cfg, ins, NULL);
11233 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
11236 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
11238 ins->type = STACK_OBJ;
11239 ins->klass = cmethod->klass;
11242 MonoInst *addr, *vtvar;
11244 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
11246 if (context_used) {
11247 if (handle_class == mono_defaults.typehandle_class) {
11248 ins = emit_get_rgctx_klass (cfg, context_used,
11249 mono_class_from_mono_type (handle),
11250 MONO_RGCTX_INFO_TYPE);
11251 } else if (handle_class == mono_defaults.methodhandle_class) {
11252 ins = emit_get_rgctx_method (cfg, context_used,
11253 handle, MONO_RGCTX_INFO_METHOD);
11254 } else if (handle_class == mono_defaults.fieldhandle_class) {
11255 ins = emit_get_rgctx_field (cfg, context_used,
11256 handle, MONO_RGCTX_INFO_CLASS_FIELD);
11258 g_assert_not_reached ();
11260 } else if (cfg->compile_aot) {
11261 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n, generic_context);
11263 EMIT_NEW_PCONST (cfg, ins, handle);
11265 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11266 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
11267 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11277 MONO_INST_NEW (cfg, ins, OP_THROW);
11279 ins->sreg1 = sp [0]->dreg;
11281 bblock->out_of_line = TRUE;
11282 MONO_ADD_INS (bblock, ins);
11283 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
11284 MONO_ADD_INS (bblock, ins);
11287 link_bblock (cfg, bblock, end_bblock);
11288 start_new_bblock = 1;
11290 case CEE_ENDFINALLY:
11291 /* mono_save_seq_point_info () depends on this */
11292 if (sp != stack_start)
11293 emit_seq_point (cfg, method, ip, FALSE, FALSE);
11294 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
11295 MONO_ADD_INS (bblock, ins);
11297 start_new_bblock = 1;
11300 * Control will leave the method so empty the stack, otherwise
11301 * the next basic block will start with a nonempty stack.
11303 while (sp != stack_start) {
11308 case CEE_LEAVE_S: {
11311 if (*ip == CEE_LEAVE) {
11313 target = ip + 5 + (gint32)read32(ip + 1);
11316 target = ip + 2 + (signed char)(ip [1]);
11319 /* empty the stack */
11320 while (sp != stack_start) {
11325 * If this leave statement is in a catch block, check for a
11326 * pending exception, and rethrow it if necessary.
11327 * We avoid doing this in runtime invoke wrappers, since those are called
11328 * by native code which excepts the wrapper to catch all exceptions.
11330 for (i = 0; i < header->num_clauses; ++i) {
11331 MonoExceptionClause *clause = &header->clauses [i];
11334 * Use <= in the final comparison to handle clauses with multiple
11335 * leave statements, like in bug #78024.
11336 * The ordering of the exception clauses guarantees that we find the
11337 * innermost clause.
11339 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
11341 MonoBasicBlock *dont_throw;
11346 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
11349 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
11351 NEW_BBLOCK (cfg, dont_throw);
11354 * Currently, we always rethrow the abort exception, despite the
11355 * fact that this is not correct. See thread6.cs for an example.
11356 * But propagating the abort exception is more important than
11357 * getting the sematics right.
11359 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
11360 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
11361 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
11363 MONO_START_BB (cfg, dont_throw);
11368 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
11370 MonoExceptionClause *clause;
11372 for (tmp = handlers; tmp; tmp = tmp->next) {
11373 clause = tmp->data;
11374 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
11376 link_bblock (cfg, bblock, tblock);
11377 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
11378 ins->inst_target_bb = tblock;
11379 ins->inst_eh_block = clause;
11380 MONO_ADD_INS (bblock, ins);
11381 bblock->has_call_handler = 1;
11382 if (COMPILE_LLVM (cfg)) {
11383 MonoBasicBlock *target_bb;
11386 * Link the finally bblock with the target, since it will
11387 * conceptually branch there.
11388 * FIXME: Have to link the bblock containing the endfinally.
11390 GET_BBLOCK (cfg, target_bb, target);
11391 link_bblock (cfg, tblock, target_bb);
11394 g_list_free (handlers);
11397 MONO_INST_NEW (cfg, ins, OP_BR);
11398 MONO_ADD_INS (bblock, ins);
11399 GET_BBLOCK (cfg, tblock, target);
11400 link_bblock (cfg, bblock, tblock);
11401 ins->inst_target_bb = tblock;
11402 start_new_bblock = 1;
11404 if (*ip == CEE_LEAVE)
11413 * Mono specific opcodes
11415 case MONO_CUSTOM_PREFIX: {
11417 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
11421 case CEE_MONO_ICALL: {
11423 MonoJitICallInfo *info;
11425 token = read32 (ip + 2);
11426 func = mono_method_get_wrapper_data (method, token);
11427 info = mono_find_jit_icall_by_addr (func);
11429 g_error ("Could not find icall address in wrapper %s", mono_method_full_name (method, 1));
11432 CHECK_STACK (info->sig->param_count);
11433 sp -= info->sig->param_count;
11435 ins = mono_emit_jit_icall (cfg, info->func, sp);
11436 if (!MONO_TYPE_IS_VOID (info->sig->ret))
11440 inline_costs += 10 * num_calls++;
11444 case CEE_MONO_LDPTR: {
11447 CHECK_STACK_OVF (1);
11449 token = read32 (ip + 2);
11451 ptr = mono_method_get_wrapper_data (method, token);
11452 /* FIXME: Generalize this */
11453 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
11454 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
11459 EMIT_NEW_PCONST (cfg, ins, ptr);
11462 inline_costs += 10 * num_calls++;
11463 /* Can't embed random pointers into AOT code */
11467 case CEE_MONO_JIT_ICALL_ADDR: {
11468 MonoJitICallInfo *callinfo;
11471 CHECK_STACK_OVF (1);
11473 token = read32 (ip + 2);
11475 ptr = mono_method_get_wrapper_data (method, token);
11476 callinfo = mono_find_jit_icall_by_addr (ptr);
11477 g_assert (callinfo);
11478 EMIT_NEW_JIT_ICALL_ADDRCONST (cfg, ins, (char*)callinfo->name);
11481 inline_costs += 10 * num_calls++;
11484 case CEE_MONO_ICALL_ADDR: {
11485 MonoMethod *cmethod;
11488 CHECK_STACK_OVF (1);
11490 token = read32 (ip + 2);
11492 cmethod = mono_method_get_wrapper_data (method, token);
11494 if (cfg->compile_aot) {
11495 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
11497 ptr = mono_lookup_internal_call (cmethod);
11499 EMIT_NEW_PCONST (cfg, ins, ptr);
11505 case CEE_MONO_VTADDR: {
11506 MonoInst *src_var, *src;
11512 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
11513 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
11518 case CEE_MONO_NEWOBJ: {
11519 MonoInst *iargs [2];
11521 CHECK_STACK_OVF (1);
11523 token = read32 (ip + 2);
11524 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
11525 mono_class_init (klass);
11526 NEW_DOMAINCONST (cfg, iargs [0]);
11527 MONO_ADD_INS (cfg->cbb, iargs [0]);
11528 NEW_CLASSCONST (cfg, iargs [1], klass);
11529 MONO_ADD_INS (cfg->cbb, iargs [1]);
11530 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
11532 inline_costs += 10 * num_calls++;
11535 case CEE_MONO_OBJADDR:
11538 MONO_INST_NEW (cfg, ins, OP_MOVE);
11539 ins->dreg = alloc_ireg_mp (cfg);
11540 ins->sreg1 = sp [0]->dreg;
11541 ins->type = STACK_MP;
11542 MONO_ADD_INS (cfg->cbb, ins);
11546 case CEE_MONO_LDNATIVEOBJ:
11548 * Similar to LDOBJ, but instead load the unmanaged
11549 * representation of the vtype to the stack.
11554 token = read32 (ip + 2);
11555 klass = mono_method_get_wrapper_data (method, token);
11556 g_assert (klass->valuetype);
11557 mono_class_init (klass);
11560 MonoInst *src, *dest, *temp;
11563 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
11564 temp->backend.is_pinvoke = 1;
11565 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
11566 mini_emit_stobj (cfg, dest, src, klass, TRUE);
11568 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
11569 dest->type = STACK_VTYPE;
11570 dest->klass = klass;
11576 case CEE_MONO_RETOBJ: {
11578 * Same as RET, but return the native representation of a vtype
11581 g_assert (cfg->ret);
11582 g_assert (mono_method_signature (method)->pinvoke);
11587 token = read32 (ip + 2);
11588 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
11590 if (!cfg->vret_addr) {
11591 g_assert (cfg->ret_var_is_local);
11593 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
11595 EMIT_NEW_RETLOADA (cfg, ins);
11597 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
11599 if (sp != stack_start)
11602 MONO_INST_NEW (cfg, ins, OP_BR);
11603 ins->inst_target_bb = end_bblock;
11604 MONO_ADD_INS (bblock, ins);
11605 link_bblock (cfg, bblock, end_bblock);
11606 start_new_bblock = 1;
11610 case CEE_MONO_CISINST:
11611 case CEE_MONO_CCASTCLASS: {
11616 token = read32 (ip + 2);
11617 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
11618 if (ip [1] == CEE_MONO_CISINST)
11619 ins = handle_cisinst (cfg, klass, sp [0]);
11621 ins = handle_ccastclass (cfg, klass, sp [0]);
11627 case CEE_MONO_SAVE_LMF:
11628 case CEE_MONO_RESTORE_LMF:
11629 #ifdef MONO_ARCH_HAVE_LMF_OPS
11630 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
11631 MONO_ADD_INS (bblock, ins);
11632 cfg->need_lmf_area = TRUE;
11636 case CEE_MONO_CLASSCONST:
11637 CHECK_STACK_OVF (1);
11639 token = read32 (ip + 2);
11640 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
11643 inline_costs += 10 * num_calls++;
11645 case CEE_MONO_NOT_TAKEN:
11646 bblock->out_of_line = TRUE;
11649 case CEE_MONO_TLS: {
11652 CHECK_STACK_OVF (1);
11654 key = (gint32)read32 (ip + 2);
11655 g_assert (key < TLS_KEY_NUM);
11657 ins = mono_create_tls_get (cfg, key);
11659 if (cfg->compile_aot) {
11661 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
11662 ins->dreg = alloc_preg (cfg);
11663 ins->type = STACK_PTR;
11665 g_assert_not_reached ();
11668 ins->type = STACK_PTR;
11669 MONO_ADD_INS (bblock, ins);
11674 case CEE_MONO_DYN_CALL: {
11675 MonoCallInst *call;
11677 /* It would be easier to call a trampoline, but that would put an
11678 * extra frame on the stack, confusing exception handling. So
11679 * implement it inline using an opcode for now.
11682 if (!cfg->dyn_call_var) {
11683 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
11684 /* prevent it from being register allocated */
11685 cfg->dyn_call_var->flags |= MONO_INST_VOLATILE;
11688 /* Has to use a call inst since it local regalloc expects it */
11689 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
11690 ins = (MonoInst*)call;
11692 ins->sreg1 = sp [0]->dreg;
11693 ins->sreg2 = sp [1]->dreg;
11694 MONO_ADD_INS (bblock, ins);
11696 cfg->param_area = MAX (cfg->param_area, MONO_ARCH_DYN_CALL_PARAM_AREA);
11699 inline_costs += 10 * num_calls++;
11703 case CEE_MONO_MEMORY_BARRIER: {
11705 emit_memory_barrier (cfg, (int)read32 (ip + 2));
11709 case CEE_MONO_JIT_ATTACH: {
11710 MonoInst *args [16], *domain_ins;
11711 MonoInst *ad_ins, *jit_tls_ins;
11712 MonoBasicBlock *next_bb = NULL, *call_bb = NULL;
11714 cfg->orig_domain_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
11716 EMIT_NEW_PCONST (cfg, ins, NULL);
11717 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
11719 ad_ins = mono_get_domain_intrinsic (cfg);
11720 jit_tls_ins = mono_get_jit_tls_intrinsic (cfg);
11722 if (MONO_ARCH_HAVE_TLS_GET && ad_ins && jit_tls_ins) {
11723 NEW_BBLOCK (cfg, next_bb);
11724 NEW_BBLOCK (cfg, call_bb);
11726 if (cfg->compile_aot) {
11727 /* AOT code is only used in the root domain */
11728 EMIT_NEW_PCONST (cfg, domain_ins, NULL);
11730 EMIT_NEW_PCONST (cfg, domain_ins, cfg->domain);
11732 MONO_ADD_INS (cfg->cbb, ad_ins);
11733 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, ad_ins->dreg, domain_ins->dreg);
11734 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, call_bb);
11736 MONO_ADD_INS (cfg->cbb, jit_tls_ins);
11737 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, jit_tls_ins->dreg, 0);
11738 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, call_bb);
11740 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, next_bb);
11741 MONO_START_BB (cfg, call_bb);
11744 if (cfg->compile_aot) {
11745 /* AOT code is only used in the root domain */
11746 EMIT_NEW_PCONST (cfg, args [0], NULL);
11748 EMIT_NEW_PCONST (cfg, args [0], cfg->domain);
11750 ins = mono_emit_jit_icall (cfg, mono_jit_thread_attach, args);
11751 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
11754 MONO_START_BB (cfg, next_bb);
11760 case CEE_MONO_JIT_DETACH: {
11761 MonoInst *args [16];
11763 /* Restore the original domain */
11764 dreg = alloc_ireg (cfg);
11765 EMIT_NEW_UNALU (cfg, args [0], OP_MOVE, dreg, cfg->orig_domain_var->dreg);
11766 mono_emit_jit_icall (cfg, mono_jit_set_domain, args);
11771 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
11777 case CEE_PREFIX1: {
11780 case CEE_ARGLIST: {
11781 /* somewhat similar to LDTOKEN */
11782 MonoInst *addr, *vtvar;
11783 CHECK_STACK_OVF (1);
11784 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
11786 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11787 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
11789 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11790 ins->type = STACK_VTYPE;
11791 ins->klass = mono_defaults.argumenthandle_class;
11804 * The following transforms:
11805 * CEE_CEQ into OP_CEQ
11806 * CEE_CGT into OP_CGT
11807 * CEE_CGT_UN into OP_CGT_UN
11808 * CEE_CLT into OP_CLT
11809 * CEE_CLT_UN into OP_CLT_UN
11811 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
11813 MONO_INST_NEW (cfg, ins, cmp->opcode);
11815 cmp->sreg1 = sp [0]->dreg;
11816 cmp->sreg2 = sp [1]->dreg;
11817 type_from_op (cmp, sp [0], sp [1]);
11819 if ((sp [0]->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
11820 cmp->opcode = OP_LCOMPARE;
11821 else if (sp [0]->type == STACK_R8)
11822 cmp->opcode = OP_FCOMPARE;
11824 cmp->opcode = OP_ICOMPARE;
11825 MONO_ADD_INS (bblock, cmp);
11826 ins->type = STACK_I4;
11827 ins->dreg = alloc_dreg (cfg, ins->type);
11828 type_from_op (ins, sp [0], sp [1]);
11830 if (cmp->opcode == OP_FCOMPARE) {
11832 * The backends expect the fceq opcodes to do the
11835 ins->sreg1 = cmp->sreg1;
11836 ins->sreg2 = cmp->sreg2;
11839 MONO_ADD_INS (bblock, ins);
11845 MonoInst *argconst;
11846 MonoMethod *cil_method;
11848 CHECK_STACK_OVF (1);
11850 n = read32 (ip + 2);
11851 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
11852 if (!cmethod || mono_loader_get_last_error ())
11854 mono_class_init (cmethod->klass);
11856 mono_save_token_info (cfg, image, n, cmethod);
11858 context_used = mini_method_check_context_used (cfg, cmethod);
11860 cil_method = cmethod;
11861 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
11862 METHOD_ACCESS_FAILURE (method, cil_method);
11864 if (mono_security_cas_enabled ()) {
11865 if (check_linkdemand (cfg, method, cmethod))
11866 INLINE_FAILURE ("linkdemand");
11867 CHECK_CFG_EXCEPTION;
11868 } else if (mono_security_core_clr_enabled ()) {
11869 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
11873 * Optimize the common case of ldftn+delegate creation
11875 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
11876 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
11877 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
11878 MonoInst *target_ins, *handle_ins;
11879 MonoMethod *invoke;
11880 int invoke_context_used;
11882 invoke = mono_get_delegate_invoke (ctor_method->klass);
11883 if (!invoke || !mono_method_signature (invoke))
11886 invoke_context_used = mini_method_check_context_used (cfg, invoke);
11888 target_ins = sp [-1];
11890 if (mono_security_core_clr_enabled ())
11891 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method, bblock, ip);
11893 if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
11894 /*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
11895 if (mono_method_signature (invoke)->param_count == mono_method_signature (cmethod)->param_count) {
11896 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0);
11897 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException");
11901 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE)
11902 /* FIXME: SGEN support */
11903 if (invoke_context_used == 0) {
11905 if (cfg->verbose_level > 3)
11906 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
11907 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, FALSE))) {
11910 CHECK_CFG_EXCEPTION;
11921 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
11922 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
11926 inline_costs += 10 * num_calls++;
11929 case CEE_LDVIRTFTN: {
11930 MonoInst *args [2];
11934 n = read32 (ip + 2);
11935 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
11936 if (!cmethod || mono_loader_get_last_error ())
11938 mono_class_init (cmethod->klass);
11940 context_used = mini_method_check_context_used (cfg, cmethod);
11942 if (mono_security_cas_enabled ()) {
11943 if (check_linkdemand (cfg, method, cmethod))
11944 INLINE_FAILURE ("linkdemand");
11945 CHECK_CFG_EXCEPTION;
11946 } else if (mono_security_core_clr_enabled ()) {
11947 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
11951 * Optimize the common case of ldvirtftn+delegate creation
11953 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ) && (ip > header->code && ip [-1] == CEE_DUP)) {
11954 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
11955 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
11956 MonoInst *target_ins, *handle_ins;
11957 MonoMethod *invoke;
11958 int invoke_context_used;
11960 invoke = mono_get_delegate_invoke (ctor_method->klass);
11961 if (!invoke || !mono_method_signature (invoke))
11964 invoke_context_used = mini_method_check_context_used (cfg, invoke);
11966 target_ins = sp [-1];
11968 if (mono_security_core_clr_enabled ())
11969 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method, bblock, ip);
11971 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE)
11972 /* FIXME: SGEN support */
11973 if (invoke_context_used == 0) {
11975 if (cfg->verbose_level > 3)
11976 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
11977 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, TRUE))) {
11980 CHECK_CFG_EXCEPTION;
11994 args [1] = emit_get_rgctx_method (cfg, context_used,
11995 cmethod, MONO_RGCTX_INFO_METHOD);
11998 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
12000 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
12003 inline_costs += 10 * num_calls++;
12007 CHECK_STACK_OVF (1);
12009 n = read16 (ip + 2);
12011 EMIT_NEW_ARGLOAD (cfg, ins, n);
12016 CHECK_STACK_OVF (1);
12018 n = read16 (ip + 2);
12020 NEW_ARGLOADA (cfg, ins, n);
12021 MONO_ADD_INS (cfg->cbb, ins);
12029 n = read16 (ip + 2);
12031 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
12033 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
12037 CHECK_STACK_OVF (1);
12039 n = read16 (ip + 2);
12041 EMIT_NEW_LOCLOAD (cfg, ins, n);
12046 unsigned char *tmp_ip;
12047 CHECK_STACK_OVF (1);
12049 n = read16 (ip + 2);
12052 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
12058 EMIT_NEW_LOCLOADA (cfg, ins, n);
12067 n = read16 (ip + 2);
12069 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
12071 emit_stloc_ir (cfg, sp, header, n);
12078 if (sp != stack_start)
12080 if (cfg->method != method)
12082 * Inlining this into a loop in a parent could lead to
12083 * stack overflows which is different behavior than the
12084 * non-inlined case, thus disable inlining in this case.
12086 INLINE_FAILURE("localloc");
12088 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
12089 ins->dreg = alloc_preg (cfg);
12090 ins->sreg1 = sp [0]->dreg;
12091 ins->type = STACK_PTR;
12092 MONO_ADD_INS (cfg->cbb, ins);
12094 cfg->flags |= MONO_CFG_HAS_ALLOCA;
12096 ins->flags |= MONO_INST_INIT;
12101 case CEE_ENDFILTER: {
12102 MonoExceptionClause *clause, *nearest;
12103 int cc, nearest_num;
12107 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
12109 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
12110 ins->sreg1 = (*sp)->dreg;
12111 MONO_ADD_INS (bblock, ins);
12112 start_new_bblock = 1;
12117 for (cc = 0; cc < header->num_clauses; ++cc) {
12118 clause = &header->clauses [cc];
12119 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
12120 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
12121 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
12126 g_assert (nearest);
12127 if ((ip - header->code) != nearest->handler_offset)
12132 case CEE_UNALIGNED_:
12133 ins_flag |= MONO_INST_UNALIGNED;
12134 /* FIXME: record alignment? we can assume 1 for now */
12138 case CEE_VOLATILE_:
12139 ins_flag |= MONO_INST_VOLATILE;
12143 ins_flag |= MONO_INST_TAILCALL;
12144 cfg->flags |= MONO_CFG_HAS_TAIL;
12145 /* Can't inline tail calls at this time */
12146 inline_costs += 100000;
12153 token = read32 (ip + 2);
12154 klass = mini_get_class (method, token, generic_context);
12155 CHECK_TYPELOAD (klass);
12156 if (generic_class_is_reference_type (cfg, klass))
12157 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
12159 mini_emit_initobj (cfg, *sp, NULL, klass);
12163 case CEE_CONSTRAINED_:
12165 token = read32 (ip + 2);
12166 constrained_call = mini_get_class (method, token, generic_context);
12167 CHECK_TYPELOAD (constrained_call);
12171 case CEE_INITBLK: {
12172 MonoInst *iargs [3];
12176 /* Skip optimized paths for volatile operations. */
12177 if ((ip [1] == CEE_CPBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
12178 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
12179 } else if ((ip [1] == CEE_INITBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
12180 /* emit_memset only works when val == 0 */
12181 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
12184 iargs [0] = sp [0];
12185 iargs [1] = sp [1];
12186 iargs [2] = sp [2];
12187 if (ip [1] == CEE_CPBLK) {
12189 * FIXME: It's unclear whether we should be emitting both the acquire
12190 * and release barriers for cpblk. It is technically both a load and
12191 * store operation, so it seems like that's the sensible thing to do.
12193 * FIXME: We emit full barriers on both sides of the operation for
12194 * simplicity. We should have a separate atomic memcpy method instead.
12196 MonoMethod *memcpy_method = get_memcpy_method ();
12198 if (ins_flag & MONO_INST_VOLATILE)
12199 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
12201 call = mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
12202 call->flags |= ins_flag;
12204 if (ins_flag & MONO_INST_VOLATILE)
12205 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
12207 MonoMethod *memset_method = get_memset_method ();
12208 if (ins_flag & MONO_INST_VOLATILE) {
12209 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
12210 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
12212 call = mono_emit_method_call (cfg, memset_method, iargs, NULL);
12213 call->flags |= ins_flag;
12224 ins_flag |= MONO_INST_NOTYPECHECK;
12226 ins_flag |= MONO_INST_NORANGECHECK;
12227 /* we ignore the no-nullcheck for now since we
12228 * really do it explicitly only when doing callvirt->call
12232 case CEE_RETHROW: {
12234 int handler_offset = -1;
12236 for (i = 0; i < header->num_clauses; ++i) {
12237 MonoExceptionClause *clause = &header->clauses [i];
12238 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
12239 handler_offset = clause->handler_offset;
12244 bblock->flags |= BB_EXCEPTION_UNSAFE;
12246 if (handler_offset == -1)
12249 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
12250 MONO_INST_NEW (cfg, ins, OP_RETHROW);
12251 ins->sreg1 = load->dreg;
12252 MONO_ADD_INS (bblock, ins);
12254 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
12255 MONO_ADD_INS (bblock, ins);
12258 link_bblock (cfg, bblock, end_bblock);
12259 start_new_bblock = 1;
12267 CHECK_STACK_OVF (1);
12269 token = read32 (ip + 2);
12270 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !image_is_dynamic (method->klass->image) && !generic_context) {
12271 MonoType *type = mono_type_create_from_typespec_checked (image, token, &cfg->error);
12274 val = mono_type_size (type, &ialign);
12276 MonoClass *klass = mini_get_class (method, token, generic_context);
12277 CHECK_TYPELOAD (klass);
12279 val = mono_type_size (&klass->byval_arg, &ialign);
12281 if (mini_is_gsharedvt_klass (cfg, klass))
12282 GSHAREDVT_FAILURE (*ip);
12284 EMIT_NEW_ICONST (cfg, ins, val);
12289 case CEE_REFANYTYPE: {
12290 MonoInst *src_var, *src;
12292 GSHAREDVT_FAILURE (*ip);
12298 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
12300 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
12301 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
12302 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type));
12307 case CEE_READONLY_:
12320 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
12330 g_warning ("opcode 0x%02x not handled", *ip);
12334 if (start_new_bblock != 1)
12337 bblock->cil_length = ip - bblock->cil_code;
12338 if (bblock->next_bb) {
12339 /* This could already be set because of inlining, #693905 */
12340 MonoBasicBlock *bb = bblock;
12342 while (bb->next_bb)
12344 bb->next_bb = end_bblock;
12346 bblock->next_bb = end_bblock;
12349 if (cfg->method == method && cfg->domainvar) {
12351 MonoInst *get_domain;
12353 cfg->cbb = init_localsbb;
12355 if ((get_domain = mono_get_domain_intrinsic (cfg))) {
12356 MONO_ADD_INS (cfg->cbb, get_domain);
12358 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
12360 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
12361 MONO_ADD_INS (cfg->cbb, store);
12364 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
12365 if (cfg->compile_aot)
12366 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
12367 mono_get_got_var (cfg);
12370 if (cfg->method == method && cfg->got_var)
12371 mono_emit_load_got_addr (cfg);
12373 if (init_localsbb) {
12374 cfg->cbb = init_localsbb;
12376 for (i = 0; i < header->num_locals; ++i) {
12377 emit_init_local (cfg, i, header->locals [i], init_locals);
12381 if (cfg->init_ref_vars && cfg->method == method) {
12382 /* Emit initialization for ref vars */
12383 // FIXME: Avoid duplication initialization for IL locals.
12384 for (i = 0; i < cfg->num_varinfo; ++i) {
12385 MonoInst *ins = cfg->varinfo [i];
12387 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
12388 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
12392 if (cfg->lmf_var && cfg->method == method) {
12393 cfg->cbb = init_localsbb;
12394 emit_push_lmf (cfg);
12397 cfg->cbb = init_localsbb;
12398 emit_instrumentation_call (cfg, mono_profiler_method_enter);
12401 MonoBasicBlock *bb;
12404 * Make seq points at backward branch targets interruptable.
12406 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
12407 if (bb->code && bb->in_count > 1 && bb->code->opcode == OP_SEQ_POINT)
12408 bb->code->flags |= MONO_INST_SINGLE_STEP_LOC;
12411 /* Add a sequence point for method entry/exit events */
12412 if (cfg->gen_seq_points_debug_data) {
12413 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
12414 MONO_ADD_INS (init_localsbb, ins);
12415 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
12416 MONO_ADD_INS (cfg->bb_exit, ins);
12420 * Add seq points for IL offsets which have line number info, but wasn't generated a seq point during JITting because
12421 * the code they refer to was dead (#11880).
12423 if (sym_seq_points) {
12424 for (i = 0; i < header->code_size; ++i) {
12425 if (mono_bitset_test_fast (seq_point_locs, i) && !mono_bitset_test_fast (seq_point_set_locs, i)) {
12428 NEW_SEQ_POINT (cfg, ins, i, FALSE);
12429 mono_add_seq_point (cfg, NULL, ins, SEQ_POINT_NATIVE_OFFSET_DEAD_CODE);
12436 if (cfg->method == method) {
12437 MonoBasicBlock *bb;
12438 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
12439 bb->region = mono_find_block_region (cfg, bb->real_offset);
12441 mono_create_spvar_for_region (cfg, bb->region);
12442 if (cfg->verbose_level > 2)
12443 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
12447 if (inline_costs < 0) {
12450 /* Method is too large */
12451 mname = mono_method_full_name (method, TRUE);
12452 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
12453 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
12457 if ((cfg->verbose_level > 2) && (cfg->method == method))
12458 mono_print_code (cfg, "AFTER METHOD-TO-IR");
12463 g_assert (!mono_error_ok (&cfg->error));
12467 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
12471 set_exception_type_from_invalid_il (cfg, method, ip);
12475 g_slist_free (class_inits);
12476 mono_basic_block_free (original_bb);
12477 cfg->dont_inline = g_list_remove (cfg->dont_inline, method);
12478 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
12479 if (cfg->exception_type)
12482 return inline_costs;
12486 store_membase_reg_to_store_membase_imm (int opcode)
12489 case OP_STORE_MEMBASE_REG:
12490 return OP_STORE_MEMBASE_IMM;
12491 case OP_STOREI1_MEMBASE_REG:
12492 return OP_STOREI1_MEMBASE_IMM;
12493 case OP_STOREI2_MEMBASE_REG:
12494 return OP_STOREI2_MEMBASE_IMM;
12495 case OP_STOREI4_MEMBASE_REG:
12496 return OP_STOREI4_MEMBASE_IMM;
12497 case OP_STOREI8_MEMBASE_REG:
12498 return OP_STOREI8_MEMBASE_IMM;
12500 g_assert_not_reached ();
12507 mono_op_to_op_imm (int opcode)
12511 return OP_IADD_IMM;
12513 return OP_ISUB_IMM;
12515 return OP_IDIV_IMM;
12517 return OP_IDIV_UN_IMM;
12519 return OP_IREM_IMM;
12521 return OP_IREM_UN_IMM;
12523 return OP_IMUL_IMM;
12525 return OP_IAND_IMM;
12529 return OP_IXOR_IMM;
12531 return OP_ISHL_IMM;
12533 return OP_ISHR_IMM;
12535 return OP_ISHR_UN_IMM;
12538 return OP_LADD_IMM;
12540 return OP_LSUB_IMM;
12542 return OP_LAND_IMM;
12546 return OP_LXOR_IMM;
12548 return OP_LSHL_IMM;
12550 return OP_LSHR_IMM;
12552 return OP_LSHR_UN_IMM;
12553 #if SIZEOF_REGISTER == 8
12555 return OP_LREM_IMM;
12559 return OP_COMPARE_IMM;
12561 return OP_ICOMPARE_IMM;
12563 return OP_LCOMPARE_IMM;
12565 case OP_STORE_MEMBASE_REG:
12566 return OP_STORE_MEMBASE_IMM;
12567 case OP_STOREI1_MEMBASE_REG:
12568 return OP_STOREI1_MEMBASE_IMM;
12569 case OP_STOREI2_MEMBASE_REG:
12570 return OP_STOREI2_MEMBASE_IMM;
12571 case OP_STOREI4_MEMBASE_REG:
12572 return OP_STOREI4_MEMBASE_IMM;
12574 #if defined(TARGET_X86) || defined (TARGET_AMD64)
12576 return OP_X86_PUSH_IMM;
12577 case OP_X86_COMPARE_MEMBASE_REG:
12578 return OP_X86_COMPARE_MEMBASE_IMM;
12580 #if defined(TARGET_AMD64)
12581 case OP_AMD64_ICOMPARE_MEMBASE_REG:
12582 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
12584 case OP_VOIDCALL_REG:
12585 return OP_VOIDCALL;
12593 return OP_LOCALLOC_IMM;
12600 ldind_to_load_membase (int opcode)
12604 return OP_LOADI1_MEMBASE;
12606 return OP_LOADU1_MEMBASE;
12608 return OP_LOADI2_MEMBASE;
12610 return OP_LOADU2_MEMBASE;
12612 return OP_LOADI4_MEMBASE;
12614 return OP_LOADU4_MEMBASE;
12616 return OP_LOAD_MEMBASE;
12617 case CEE_LDIND_REF:
12618 return OP_LOAD_MEMBASE;
12620 return OP_LOADI8_MEMBASE;
12622 return OP_LOADR4_MEMBASE;
12624 return OP_LOADR8_MEMBASE;
12626 g_assert_not_reached ();
12633 stind_to_store_membase (int opcode)
12637 return OP_STOREI1_MEMBASE_REG;
12639 return OP_STOREI2_MEMBASE_REG;
12641 return OP_STOREI4_MEMBASE_REG;
12643 case CEE_STIND_REF:
12644 return OP_STORE_MEMBASE_REG;
12646 return OP_STOREI8_MEMBASE_REG;
12648 return OP_STORER4_MEMBASE_REG;
12650 return OP_STORER8_MEMBASE_REG;
12652 g_assert_not_reached ();
12659 mono_load_membase_to_load_mem (int opcode)
12661 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
12662 #if defined(TARGET_X86) || defined(TARGET_AMD64)
12664 case OP_LOAD_MEMBASE:
12665 return OP_LOAD_MEM;
12666 case OP_LOADU1_MEMBASE:
12667 return OP_LOADU1_MEM;
12668 case OP_LOADU2_MEMBASE:
12669 return OP_LOADU2_MEM;
12670 case OP_LOADI4_MEMBASE:
12671 return OP_LOADI4_MEM;
12672 case OP_LOADU4_MEMBASE:
12673 return OP_LOADU4_MEM;
12674 #if SIZEOF_REGISTER == 8
12675 case OP_LOADI8_MEMBASE:
12676 return OP_LOADI8_MEM;
12685 op_to_op_dest_membase (int store_opcode, int opcode)
12687 #if defined(TARGET_X86)
12688 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
12693 return OP_X86_ADD_MEMBASE_REG;
12695 return OP_X86_SUB_MEMBASE_REG;
12697 return OP_X86_AND_MEMBASE_REG;
12699 return OP_X86_OR_MEMBASE_REG;
12701 return OP_X86_XOR_MEMBASE_REG;
12704 return OP_X86_ADD_MEMBASE_IMM;
12707 return OP_X86_SUB_MEMBASE_IMM;
12710 return OP_X86_AND_MEMBASE_IMM;
12713 return OP_X86_OR_MEMBASE_IMM;
12716 return OP_X86_XOR_MEMBASE_IMM;
12722 #if defined(TARGET_AMD64)
12723 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
12728 return OP_X86_ADD_MEMBASE_REG;
12730 return OP_X86_SUB_MEMBASE_REG;
12732 return OP_X86_AND_MEMBASE_REG;
12734 return OP_X86_OR_MEMBASE_REG;
12736 return OP_X86_XOR_MEMBASE_REG;
12738 return OP_X86_ADD_MEMBASE_IMM;
12740 return OP_X86_SUB_MEMBASE_IMM;
12742 return OP_X86_AND_MEMBASE_IMM;
12744 return OP_X86_OR_MEMBASE_IMM;
12746 return OP_X86_XOR_MEMBASE_IMM;
12748 return OP_AMD64_ADD_MEMBASE_REG;
12750 return OP_AMD64_SUB_MEMBASE_REG;
12752 return OP_AMD64_AND_MEMBASE_REG;
12754 return OP_AMD64_OR_MEMBASE_REG;
12756 return OP_AMD64_XOR_MEMBASE_REG;
12759 return OP_AMD64_ADD_MEMBASE_IMM;
12762 return OP_AMD64_SUB_MEMBASE_IMM;
12765 return OP_AMD64_AND_MEMBASE_IMM;
12768 return OP_AMD64_OR_MEMBASE_IMM;
12771 return OP_AMD64_XOR_MEMBASE_IMM;
12781 op_to_op_store_membase (int store_opcode, int opcode)
12783 #if defined(TARGET_X86) || defined(TARGET_AMD64)
12786 if (store_opcode == OP_STOREI1_MEMBASE_REG)
12787 return OP_X86_SETEQ_MEMBASE;
12789 if (store_opcode == OP_STOREI1_MEMBASE_REG)
12790 return OP_X86_SETNE_MEMBASE;
12798 op_to_op_src1_membase (int load_opcode, int opcode)
12801 /* FIXME: This has sign extension issues */
12803 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
12804 return OP_X86_COMPARE_MEMBASE8_IMM;
12807 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
12812 return OP_X86_PUSH_MEMBASE;
12813 case OP_COMPARE_IMM:
12814 case OP_ICOMPARE_IMM:
12815 return OP_X86_COMPARE_MEMBASE_IMM;
12818 return OP_X86_COMPARE_MEMBASE_REG;
12822 #ifdef TARGET_AMD64
12823 /* FIXME: This has sign extension issues */
12825 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
12826 return OP_X86_COMPARE_MEMBASE8_IMM;
12831 #ifdef __mono_ilp32__
12832 if (load_opcode == OP_LOADI8_MEMBASE)
12834 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
12836 return OP_X86_PUSH_MEMBASE;
12838 /* FIXME: This only works for 32 bit immediates
12839 case OP_COMPARE_IMM:
12840 case OP_LCOMPARE_IMM:
12841 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
12842 return OP_AMD64_COMPARE_MEMBASE_IMM;
12844 case OP_ICOMPARE_IMM:
12845 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
12846 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
12850 #ifdef __mono_ilp32__
12851 if (load_opcode == OP_LOAD_MEMBASE)
12852 return OP_AMD64_ICOMPARE_MEMBASE_REG;
12853 if (load_opcode == OP_LOADI8_MEMBASE)
12855 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
12857 return OP_AMD64_COMPARE_MEMBASE_REG;
12860 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
12861 return OP_AMD64_ICOMPARE_MEMBASE_REG;
12870 op_to_op_src2_membase (int load_opcode, int opcode)
12873 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
12879 return OP_X86_COMPARE_REG_MEMBASE;
12881 return OP_X86_ADD_REG_MEMBASE;
12883 return OP_X86_SUB_REG_MEMBASE;
12885 return OP_X86_AND_REG_MEMBASE;
12887 return OP_X86_OR_REG_MEMBASE;
12889 return OP_X86_XOR_REG_MEMBASE;
12893 #ifdef TARGET_AMD64
12894 #ifdef __mono_ilp32__
12895 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE) ) {
12897 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)) {
12901 return OP_AMD64_ICOMPARE_REG_MEMBASE;
12903 return OP_X86_ADD_REG_MEMBASE;
12905 return OP_X86_SUB_REG_MEMBASE;
12907 return OP_X86_AND_REG_MEMBASE;
12909 return OP_X86_OR_REG_MEMBASE;
12911 return OP_X86_XOR_REG_MEMBASE;
12913 #ifdef __mono_ilp32__
12914 } else if (load_opcode == OP_LOADI8_MEMBASE) {
12916 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE)) {
12921 return OP_AMD64_COMPARE_REG_MEMBASE;
12923 return OP_AMD64_ADD_REG_MEMBASE;
12925 return OP_AMD64_SUB_REG_MEMBASE;
12927 return OP_AMD64_AND_REG_MEMBASE;
12929 return OP_AMD64_OR_REG_MEMBASE;
12931 return OP_AMD64_XOR_REG_MEMBASE;
12940 mono_op_to_op_imm_noemul (int opcode)
12943 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
12949 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
12956 #if defined(MONO_ARCH_EMULATE_MUL_DIV)
12961 return mono_op_to_op_imm (opcode);
12966 * mono_handle_global_vregs:
12968 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
12972 mono_handle_global_vregs (MonoCompile *cfg)
12974 gint32 *vreg_to_bb;
12975 MonoBasicBlock *bb;
12978 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
12980 #ifdef MONO_ARCH_SIMD_INTRINSICS
12981 if (cfg->uses_simd_intrinsics)
12982 mono_simd_simplify_indirection (cfg);
12985 /* Find local vregs used in more than one bb */
12986 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
12987 MonoInst *ins = bb->code;
12988 int block_num = bb->block_num;
12990 if (cfg->verbose_level > 2)
12991 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
12994 for (; ins; ins = ins->next) {
12995 const char *spec = INS_INFO (ins->opcode);
12996 int regtype = 0, regindex;
12999 if (G_UNLIKELY (cfg->verbose_level > 2))
13000 mono_print_ins (ins);
13002 g_assert (ins->opcode >= MONO_CEE_LAST);
13004 for (regindex = 0; regindex < 4; regindex ++) {
13007 if (regindex == 0) {
13008 regtype = spec [MONO_INST_DEST];
13009 if (regtype == ' ')
13012 } else if (regindex == 1) {
13013 regtype = spec [MONO_INST_SRC1];
13014 if (regtype == ' ')
13017 } else if (regindex == 2) {
13018 regtype = spec [MONO_INST_SRC2];
13019 if (regtype == ' ')
13022 } else if (regindex == 3) {
13023 regtype = spec [MONO_INST_SRC3];
13024 if (regtype == ' ')
13029 #if SIZEOF_REGISTER == 4
13030 /* In the LLVM case, the long opcodes are not decomposed */
13031 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
13033 * Since some instructions reference the original long vreg,
13034 * and some reference the two component vregs, it is quite hard
13035 * to determine when it needs to be global. So be conservative.
13037 if (!get_vreg_to_inst (cfg, vreg)) {
13038 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
13040 if (cfg->verbose_level > 2)
13041 printf ("LONG VREG R%d made global.\n", vreg);
13045 * Make the component vregs volatile since the optimizations can
13046 * get confused otherwise.
13048 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
13049 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
13053 g_assert (vreg != -1);
13055 prev_bb = vreg_to_bb [vreg];
13056 if (prev_bb == 0) {
13057 /* 0 is a valid block num */
13058 vreg_to_bb [vreg] = block_num + 1;
13059 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
13060 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
13063 if (!get_vreg_to_inst (cfg, vreg)) {
13064 if (G_UNLIKELY (cfg->verbose_level > 2))
13065 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
13069 if (vreg_is_ref (cfg, vreg))
13070 mono_compile_create_var_for_vreg (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL, vreg);
13072 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
13075 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
13078 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
13081 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
13084 g_assert_not_reached ();
13088 /* Flag as having been used in more than one bb */
13089 vreg_to_bb [vreg] = -1;
13095 /* If a variable is used in only one bblock, convert it into a local vreg */
13096 for (i = 0; i < cfg->num_varinfo; i++) {
13097 MonoInst *var = cfg->varinfo [i];
13098 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
13100 switch (var->type) {
13106 #if SIZEOF_REGISTER == 8
13109 #if !defined(TARGET_X86)
13110 /* Enabling this screws up the fp stack on x86 */
13113 if (mono_arch_is_soft_float ())
13116 /* Arguments are implicitly global */
13117 /* Putting R4 vars into registers doesn't work currently */
13118 /* The gsharedvt vars are implicitly referenced by ldaddr opcodes, but those opcodes are only generated later */
13119 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg && var != cfg->gsharedvt_info_var && var != cfg->gsharedvt_locals_var && var != cfg->lmf_addr_var) {
13121 * Make that the variable's liveness interval doesn't contain a call, since
13122 * that would cause the lvreg to be spilled, making the whole optimization
13125 /* This is too slow for JIT compilation */
13127 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
13129 int def_index, call_index, ins_index;
13130 gboolean spilled = FALSE;
13135 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
13136 const char *spec = INS_INFO (ins->opcode);
13138 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
13139 def_index = ins_index;
13141 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
13142 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
13143 if (call_index > def_index) {
13149 if (MONO_IS_CALL (ins))
13150 call_index = ins_index;
13160 if (G_UNLIKELY (cfg->verbose_level > 2))
13161 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
13162 var->flags |= MONO_INST_IS_DEAD;
13163 cfg->vreg_to_inst [var->dreg] = NULL;
13170 * Compress the varinfo and vars tables so the liveness computation is faster and
13171 * takes up less space.
13174 for (i = 0; i < cfg->num_varinfo; ++i) {
13175 MonoInst *var = cfg->varinfo [i];
13176 if (pos < i && cfg->locals_start == i)
13177 cfg->locals_start = pos;
13178 if (!(var->flags & MONO_INST_IS_DEAD)) {
13180 cfg->varinfo [pos] = cfg->varinfo [i];
13181 cfg->varinfo [pos]->inst_c0 = pos;
13182 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
13183 cfg->vars [pos].idx = pos;
13184 #if SIZEOF_REGISTER == 4
13185 if (cfg->varinfo [pos]->type == STACK_I8) {
13186 /* Modify the two component vars too */
13189 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
13190 var1->inst_c0 = pos;
13191 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
13192 var1->inst_c0 = pos;
13199 cfg->num_varinfo = pos;
13200 if (cfg->locals_start > cfg->num_varinfo)
13201 cfg->locals_start = cfg->num_varinfo;
13205 * mono_spill_global_vars:
13207 * Generate spill code for variables which are not allocated to registers,
13208 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
13209 * code is generated which could be optimized by the local optimization passes.
13212 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
13214 MonoBasicBlock *bb;
13216 int orig_next_vreg;
13217 guint32 *vreg_to_lvreg;
13219 guint32 i, lvregs_len;
13220 gboolean dest_has_lvreg = FALSE;
13221 guint32 stacktypes [128];
13222 MonoInst **live_range_start, **live_range_end;
13223 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
13224 int *gsharedvt_vreg_to_idx = NULL;
13226 *need_local_opts = FALSE;
13228 memset (spec2, 0, sizeof (spec2));
13230 /* FIXME: Move this function to mini.c */
13231 stacktypes ['i'] = STACK_PTR;
13232 stacktypes ['l'] = STACK_I8;
13233 stacktypes ['f'] = STACK_R8;
13234 #ifdef MONO_ARCH_SIMD_INTRINSICS
13235 stacktypes ['x'] = STACK_VTYPE;
13238 #if SIZEOF_REGISTER == 4
13239 /* Create MonoInsts for longs */
13240 for (i = 0; i < cfg->num_varinfo; i++) {
13241 MonoInst *ins = cfg->varinfo [i];
13243 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
13244 switch (ins->type) {
13249 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
13252 g_assert (ins->opcode == OP_REGOFFSET);
13254 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
13256 tree->opcode = OP_REGOFFSET;
13257 tree->inst_basereg = ins->inst_basereg;
13258 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
13260 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
13262 tree->opcode = OP_REGOFFSET;
13263 tree->inst_basereg = ins->inst_basereg;
13264 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
13274 if (cfg->compute_gc_maps) {
13275 /* registers need liveness info even for !non refs */
13276 for (i = 0; i < cfg->num_varinfo; i++) {
13277 MonoInst *ins = cfg->varinfo [i];
13279 if (ins->opcode == OP_REGVAR)
13280 ins->flags |= MONO_INST_GC_TRACK;
13284 if (cfg->gsharedvt) {
13285 gsharedvt_vreg_to_idx = mono_mempool_alloc0 (cfg->mempool, sizeof (int) * cfg->next_vreg);
13287 for (i = 0; i < cfg->num_varinfo; ++i) {
13288 MonoInst *ins = cfg->varinfo [i];
13291 if (mini_is_gsharedvt_variable_type (cfg, ins->inst_vtype)) {
13292 if (i >= cfg->locals_start) {
13294 idx = get_gsharedvt_info_slot (cfg, ins->inst_vtype, MONO_RGCTX_INFO_LOCAL_OFFSET);
13295 gsharedvt_vreg_to_idx [ins->dreg] = idx + 1;
13296 ins->opcode = OP_GSHAREDVT_LOCAL;
13297 ins->inst_imm = idx;
13300 gsharedvt_vreg_to_idx [ins->dreg] = -1;
13301 ins->opcode = OP_GSHAREDVT_ARG_REGOFFSET;
13307 /* FIXME: widening and truncation */
13310 * As an optimization, when a variable allocated to the stack is first loaded into
13311 * an lvreg, we will remember the lvreg and use it the next time instead of loading
13312 * the variable again.
13314 orig_next_vreg = cfg->next_vreg;
13315 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
13316 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
13320 * These arrays contain the first and last instructions accessing a given
13322 * Since we emit bblocks in the same order we process them here, and we
13323 * don't split live ranges, these will precisely describe the live range of
13324 * the variable, i.e. the instruction range where a valid value can be found
13325 * in the variables location.
13326 * The live range is computed using the liveness info computed by the liveness pass.
13327 * We can't use vmv->range, since that is an abstract live range, and we need
13328 * one which is instruction precise.
13329 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
13331 /* FIXME: Only do this if debugging info is requested */
13332 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
13333 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
13334 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
13335 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
13337 /* Add spill loads/stores */
13338 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13341 if (cfg->verbose_level > 2)
13342 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
13344 /* Clear vreg_to_lvreg array */
13345 for (i = 0; i < lvregs_len; i++)
13346 vreg_to_lvreg [lvregs [i]] = 0;
13350 MONO_BB_FOR_EACH_INS (bb, ins) {
13351 const char *spec = INS_INFO (ins->opcode);
13352 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
13353 gboolean store, no_lvreg;
13354 int sregs [MONO_MAX_SRC_REGS];
13356 if (G_UNLIKELY (cfg->verbose_level > 2))
13357 mono_print_ins (ins);
13359 if (ins->opcode == OP_NOP)
13363 * We handle LDADDR here as well, since it can only be decomposed
13364 * when variable addresses are known.
13366 if (ins->opcode == OP_LDADDR) {
13367 MonoInst *var = ins->inst_p0;
13369 if (var->opcode == OP_VTARG_ADDR) {
13370 /* Happens on SPARC/S390 where vtypes are passed by reference */
13371 MonoInst *vtaddr = var->inst_left;
13372 if (vtaddr->opcode == OP_REGVAR) {
13373 ins->opcode = OP_MOVE;
13374 ins->sreg1 = vtaddr->dreg;
13376 else if (var->inst_left->opcode == OP_REGOFFSET) {
13377 ins->opcode = OP_LOAD_MEMBASE;
13378 ins->inst_basereg = vtaddr->inst_basereg;
13379 ins->inst_offset = vtaddr->inst_offset;
13382 } else if (cfg->gsharedvt && gsharedvt_vreg_to_idx [var->dreg] < 0) {
13383 /* gsharedvt arg passed by ref */
13384 g_assert (var->opcode == OP_GSHAREDVT_ARG_REGOFFSET);
13386 ins->opcode = OP_LOAD_MEMBASE;
13387 ins->inst_basereg = var->inst_basereg;
13388 ins->inst_offset = var->inst_offset;
13389 } else if (cfg->gsharedvt && gsharedvt_vreg_to_idx [var->dreg]) {
13390 MonoInst *load, *load2, *load3;
13391 int idx = gsharedvt_vreg_to_idx [var->dreg] - 1;
13392 int reg1, reg2, reg3;
13393 MonoInst *info_var = cfg->gsharedvt_info_var;
13394 MonoInst *locals_var = cfg->gsharedvt_locals_var;
13398 * Compute the address of the local as gsharedvt_locals_var + gsharedvt_info_var->locals_offsets [idx].
13401 g_assert (var->opcode == OP_GSHAREDVT_LOCAL);
13403 g_assert (info_var);
13404 g_assert (locals_var);
13406 /* Mark the instruction used to compute the locals var as used */
13407 cfg->gsharedvt_locals_var_ins = NULL;
13409 /* Load the offset */
13410 if (info_var->opcode == OP_REGOFFSET) {
13411 reg1 = alloc_ireg (cfg);
13412 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, reg1, info_var->inst_basereg, info_var->inst_offset);
13413 } else if (info_var->opcode == OP_REGVAR) {
13415 reg1 = info_var->dreg;
13417 g_assert_not_reached ();
13419 reg2 = alloc_ireg (cfg);
13420 NEW_LOAD_MEMBASE (cfg, load2, OP_LOADI4_MEMBASE, reg2, reg1, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
13421 /* Load the locals area address */
13422 reg3 = alloc_ireg (cfg);
13423 if (locals_var->opcode == OP_REGOFFSET) {
13424 NEW_LOAD_MEMBASE (cfg, load3, OP_LOAD_MEMBASE, reg3, locals_var->inst_basereg, locals_var->inst_offset);
13425 } else if (locals_var->opcode == OP_REGVAR) {
13426 NEW_UNALU (cfg, load3, OP_MOVE, reg3, locals_var->dreg);
13428 g_assert_not_reached ();
13430 /* Compute the address */
13431 ins->opcode = OP_PADD;
13435 mono_bblock_insert_before_ins (bb, ins, load3);
13436 mono_bblock_insert_before_ins (bb, load3, load2);
13438 mono_bblock_insert_before_ins (bb, load2, load);
13440 g_assert (var->opcode == OP_REGOFFSET);
13442 ins->opcode = OP_ADD_IMM;
13443 ins->sreg1 = var->inst_basereg;
13444 ins->inst_imm = var->inst_offset;
13447 *need_local_opts = TRUE;
13448 spec = INS_INFO (ins->opcode);
13451 if (ins->opcode < MONO_CEE_LAST) {
13452 mono_print_ins (ins);
13453 g_assert_not_reached ();
13457 * Store opcodes have destbasereg in the dreg, but in reality, it is an
13461 if (MONO_IS_STORE_MEMBASE (ins)) {
13462 tmp_reg = ins->dreg;
13463 ins->dreg = ins->sreg2;
13464 ins->sreg2 = tmp_reg;
13467 spec2 [MONO_INST_DEST] = ' ';
13468 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
13469 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
13470 spec2 [MONO_INST_SRC3] = ' ';
13472 } else if (MONO_IS_STORE_MEMINDEX (ins))
13473 g_assert_not_reached ();
13478 if (G_UNLIKELY (cfg->verbose_level > 2)) {
13479 printf ("\t %.3s %d", spec, ins->dreg);
13480 num_sregs = mono_inst_get_src_registers (ins, sregs);
13481 for (srcindex = 0; srcindex < num_sregs; ++srcindex)
13482 printf (" %d", sregs [srcindex]);
13489 regtype = spec [MONO_INST_DEST];
13490 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
13493 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
13494 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
13495 MonoInst *store_ins;
13497 MonoInst *def_ins = ins;
13498 int dreg = ins->dreg; /* The original vreg */
13500 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
13502 if (var->opcode == OP_REGVAR) {
13503 ins->dreg = var->dreg;
13504 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
13506 * Instead of emitting a load+store, use a _membase opcode.
13508 g_assert (var->opcode == OP_REGOFFSET);
13509 if (ins->opcode == OP_MOVE) {
13513 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
13514 ins->inst_basereg = var->inst_basereg;
13515 ins->inst_offset = var->inst_offset;
13518 spec = INS_INFO (ins->opcode);
13522 g_assert (var->opcode == OP_REGOFFSET);
13524 prev_dreg = ins->dreg;
13526 /* Invalidate any previous lvreg for this vreg */
13527 vreg_to_lvreg [ins->dreg] = 0;
13531 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
13533 store_opcode = OP_STOREI8_MEMBASE_REG;
13536 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
13538 #if SIZEOF_REGISTER != 8
13539 if (regtype == 'l') {
13540 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
13541 mono_bblock_insert_after_ins (bb, ins, store_ins);
13542 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
13543 mono_bblock_insert_after_ins (bb, ins, store_ins);
13544 def_ins = store_ins;
13549 g_assert (store_opcode != OP_STOREV_MEMBASE);
13551 /* Try to fuse the store into the instruction itself */
13552 /* FIXME: Add more instructions */
13553 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
13554 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
13555 ins->inst_imm = ins->inst_c0;
13556 ins->inst_destbasereg = var->inst_basereg;
13557 ins->inst_offset = var->inst_offset;
13558 spec = INS_INFO (ins->opcode);
13559 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
13560 ins->opcode = store_opcode;
13561 ins->inst_destbasereg = var->inst_basereg;
13562 ins->inst_offset = var->inst_offset;
13566 tmp_reg = ins->dreg;
13567 ins->dreg = ins->sreg2;
13568 ins->sreg2 = tmp_reg;
13571 spec2 [MONO_INST_DEST] = ' ';
13572 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
13573 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
13574 spec2 [MONO_INST_SRC3] = ' ';
13576 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
13577 // FIXME: The backends expect the base reg to be in inst_basereg
13578 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
13580 ins->inst_basereg = var->inst_basereg;
13581 ins->inst_offset = var->inst_offset;
13582 spec = INS_INFO (ins->opcode);
13584 /* printf ("INS: "); mono_print_ins (ins); */
13585 /* Create a store instruction */
13586 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
13588 /* Insert it after the instruction */
13589 mono_bblock_insert_after_ins (bb, ins, store_ins);
13591 def_ins = store_ins;
13594 * We can't assign ins->dreg to var->dreg here, since the
13595 * sregs could use it. So set a flag, and do it after
13598 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
13599 dest_has_lvreg = TRUE;
13604 if (def_ins && !live_range_start [dreg]) {
13605 live_range_start [dreg] = def_ins;
13606 live_range_start_bb [dreg] = bb;
13609 if (cfg->compute_gc_maps && def_ins && (var->flags & MONO_INST_GC_TRACK)) {
13612 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_DEF);
13613 tmp->inst_c1 = dreg;
13614 mono_bblock_insert_after_ins (bb, def_ins, tmp);
13621 num_sregs = mono_inst_get_src_registers (ins, sregs);
13622 for (srcindex = 0; srcindex < 3; ++srcindex) {
13623 regtype = spec [MONO_INST_SRC1 + srcindex];
13624 sreg = sregs [srcindex];
13626 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
13627 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
13628 MonoInst *var = get_vreg_to_inst (cfg, sreg);
13629 MonoInst *use_ins = ins;
13630 MonoInst *load_ins;
13631 guint32 load_opcode;
13633 if (var->opcode == OP_REGVAR) {
13634 sregs [srcindex] = var->dreg;
13635 //mono_inst_set_src_registers (ins, sregs);
13636 live_range_end [sreg] = use_ins;
13637 live_range_end_bb [sreg] = bb;
13639 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
13642 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
13643 /* var->dreg is a hreg */
13644 tmp->inst_c1 = sreg;
13645 mono_bblock_insert_after_ins (bb, ins, tmp);
13651 g_assert (var->opcode == OP_REGOFFSET);
13653 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
13655 g_assert (load_opcode != OP_LOADV_MEMBASE);
13657 if (vreg_to_lvreg [sreg]) {
13658 g_assert (vreg_to_lvreg [sreg] != -1);
13660 /* The variable is already loaded to an lvreg */
13661 if (G_UNLIKELY (cfg->verbose_level > 2))
13662 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
13663 sregs [srcindex] = vreg_to_lvreg [sreg];
13664 //mono_inst_set_src_registers (ins, sregs);
13668 /* Try to fuse the load into the instruction */
13669 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
13670 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
13671 sregs [0] = var->inst_basereg;
13672 //mono_inst_set_src_registers (ins, sregs);
13673 ins->inst_offset = var->inst_offset;
13674 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
13675 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
13676 sregs [1] = var->inst_basereg;
13677 //mono_inst_set_src_registers (ins, sregs);
13678 ins->inst_offset = var->inst_offset;
13680 if (MONO_IS_REAL_MOVE (ins)) {
13681 ins->opcode = OP_NOP;
13684 //printf ("%d ", srcindex); mono_print_ins (ins);
13686 sreg = alloc_dreg (cfg, stacktypes [regtype]);
13688 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
13689 if (var->dreg == prev_dreg) {
13691 * sreg refers to the value loaded by the load
13692 * emitted below, but we need to use ins->dreg
13693 * since it refers to the store emitted earlier.
13697 g_assert (sreg != -1);
13698 vreg_to_lvreg [var->dreg] = sreg;
13699 g_assert (lvregs_len < 1024);
13700 lvregs [lvregs_len ++] = var->dreg;
13704 sregs [srcindex] = sreg;
13705 //mono_inst_set_src_registers (ins, sregs);
13707 #if SIZEOF_REGISTER != 8
13708 if (regtype == 'l') {
13709 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
13710 mono_bblock_insert_before_ins (bb, ins, load_ins);
13711 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
13712 mono_bblock_insert_before_ins (bb, ins, load_ins);
13713 use_ins = load_ins;
13718 #if SIZEOF_REGISTER == 4
13719 g_assert (load_opcode != OP_LOADI8_MEMBASE);
13721 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
13722 mono_bblock_insert_before_ins (bb, ins, load_ins);
13723 use_ins = load_ins;
13727 if (var->dreg < orig_next_vreg) {
13728 live_range_end [var->dreg] = use_ins;
13729 live_range_end_bb [var->dreg] = bb;
13732 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
13735 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
13736 tmp->inst_c1 = var->dreg;
13737 mono_bblock_insert_after_ins (bb, ins, tmp);
13741 mono_inst_set_src_registers (ins, sregs);
13743 if (dest_has_lvreg) {
13744 g_assert (ins->dreg != -1);
13745 vreg_to_lvreg [prev_dreg] = ins->dreg;
13746 g_assert (lvregs_len < 1024);
13747 lvregs [lvregs_len ++] = prev_dreg;
13748 dest_has_lvreg = FALSE;
13752 tmp_reg = ins->dreg;
13753 ins->dreg = ins->sreg2;
13754 ins->sreg2 = tmp_reg;
13757 if (MONO_IS_CALL (ins)) {
13758 /* Clear vreg_to_lvreg array */
13759 for (i = 0; i < lvregs_len; i++)
13760 vreg_to_lvreg [lvregs [i]] = 0;
13762 } else if (ins->opcode == OP_NOP) {
13764 MONO_INST_NULLIFY_SREGS (ins);
13767 if (cfg->verbose_level > 2)
13768 mono_print_ins_index (1, ins);
13771 /* Extend the live range based on the liveness info */
13772 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
13773 for (i = 0; i < cfg->num_varinfo; i ++) {
13774 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
13776 if (vreg_is_volatile (cfg, vi->vreg))
13777 /* The liveness info is incomplete */
13780 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
13781 /* Live from at least the first ins of this bb */
13782 live_range_start [vi->vreg] = bb->code;
13783 live_range_start_bb [vi->vreg] = bb;
13786 if (mono_bitset_test_fast (bb->live_out_set, i)) {
13787 /* Live at least until the last ins of this bb */
13788 live_range_end [vi->vreg] = bb->last_ins;
13789 live_range_end_bb [vi->vreg] = bb;
13795 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
13797 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
13798 * by storing the current native offset into MonoMethodVar->live_range_start/end.
13800 if (cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
13801 for (i = 0; i < cfg->num_varinfo; ++i) {
13802 int vreg = MONO_VARINFO (cfg, i)->vreg;
13805 if (live_range_start [vreg]) {
13806 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
13808 ins->inst_c1 = vreg;
13809 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
13811 if (live_range_end [vreg]) {
13812 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
13814 ins->inst_c1 = vreg;
13815 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
13816 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
13818 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
13824 if (cfg->gsharedvt_locals_var_ins) {
13825 /* Nullify if unused */
13826 cfg->gsharedvt_locals_var_ins->opcode = OP_PCONST;
13827 cfg->gsharedvt_locals_var_ins->inst_imm = 0;
13830 g_free (live_range_start);
13831 g_free (live_range_end);
13832 g_free (live_range_start_bb);
13833 g_free (live_range_end_bb);
13838 * - use 'iadd' instead of 'int_add'
13839 * - handling ovf opcodes: decompose in method_to_ir.
13840 * - unify iregs/fregs
13841 * -> partly done, the missing parts are:
13842 * - a more complete unification would involve unifying the hregs as well, so
13843 * code wouldn't need if (fp) all over the place. but that would mean the hregs
13844 * would no longer map to the machine hregs, so the code generators would need to
13845 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
13846 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
13847 * fp/non-fp branches speeds it up by about 15%.
13848 * - use sext/zext opcodes instead of shifts
13850 * - get rid of TEMPLOADs if possible and use vregs instead
13851 * - clean up usage of OP_P/OP_ opcodes
13852 * - cleanup usage of DUMMY_USE
13853 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
13855 * - set the stack type and allocate a dreg in the EMIT_NEW macros
13856 * - get rid of all the <foo>2 stuff when the new JIT is ready.
13857 * - make sure handle_stack_args () is called before the branch is emitted
13858 * - when the new IR is done, get rid of all unused stuff
13859 * - COMPARE/BEQ as separate instructions or unify them ?
13860 * - keeping them separate allows specialized compare instructions like
13861 * compare_imm, compare_membase
13862 * - most back ends unify fp compare+branch, fp compare+ceq
13863 * - integrate mono_save_args into inline_method
13864 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
13865 * - handle long shift opts on 32 bit platforms somehow: they require
13866 * 3 sregs (2 for arg1 and 1 for arg2)
13867 * - make byref a 'normal' type.
13868 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
13869 * variable if needed.
13870 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
13871 * like inline_method.
13872 * - remove inlining restrictions
13873 * - fix LNEG and enable cfold of INEG
13874 * - generalize x86 optimizations like ldelema as a peephole optimization
13875 * - add store_mem_imm for amd64
13876 * - optimize the loading of the interruption flag in the managed->native wrappers
13877 * - avoid special handling of OP_NOP in passes
13878 * - move code inserting instructions into one function/macro.
13879 * - try a coalescing phase after liveness analysis
13880 * - add float -> vreg conversion + local optimizations on !x86
13881 * - figure out how to handle decomposed branches during optimizations, ie.
13882 * compare+branch, op_jump_table+op_br etc.
13883 * - promote RuntimeXHandles to vregs
13884 * - vtype cleanups:
13885 * - add a NEW_VARLOADA_VREG macro
13886 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
13887 * accessing vtype fields.
13888 * - get rid of I8CONST on 64 bit platforms
13889 * - dealing with the increase in code size due to branches created during opcode
13891 * - use extended basic blocks
13892 * - all parts of the JIT
13893 * - handle_global_vregs () && local regalloc
13894 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
13895 * - sources of increase in code size:
13898 * - isinst and castclass
13899 * - lvregs not allocated to global registers even if used multiple times
13900 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
13902 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
13903 * - add all micro optimizations from the old JIT
13904 * - put tree optimizations into the deadce pass
13905 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
13906 * specific function.
13907 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
13908 * fcompare + branchCC.
13909 * - create a helper function for allocating a stack slot, taking into account
13910 * MONO_CFG_HAS_SPILLUP.
13912 * - merge the ia64 switch changes.
13913 * - optimize mono_regstate2_alloc_int/float.
13914 * - fix the pessimistic handling of variables accessed in exception handler blocks.
13915 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
13916 * parts of the tree could be separated by other instructions, killing the tree
13917 * arguments, or stores killing loads etc. Also, should we fold loads into other
13918 * instructions if the result of the load is used multiple times ?
13919 * - make the REM_IMM optimization in mini-x86.c arch-independent.
13920 * - LAST MERGE: 108395.
13921 * - when returning vtypes in registers, generate IR and append it to the end of the
13922 * last bb instead of doing it in the epilog.
13923 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
13931 - When to decompose opcodes:
13932 - earlier: this makes some optimizations hard to implement, since the low level IR
13933 no longer contains the neccessary information. But it is easier to do.
13934 - later: harder to implement, enables more optimizations.
13935 - Branches inside bblocks:
13936 - created when decomposing complex opcodes.
13937 - branches to another bblock: harmless, but not tracked by the branch
13938 optimizations, so need to branch to a label at the start of the bblock.
13939 - branches to inside the same bblock: very problematic, trips up the local
13940 reg allocator. Can be fixed by spitting the current bblock, but that is a
13941 complex operation, since some local vregs can become global vregs etc.
13942 - Local/global vregs:
13943 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
13944 local register allocator.
13945 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
13946 structure, created by mono_create_var (). Assigned to hregs or the stack by
13947 the global register allocator.
13948 - When to do optimizations like alu->alu_imm:
13949 - earlier -> saves work later on since the IR will be smaller/simpler
13950 - later -> can work on more instructions
13951 - Handling of valuetypes:
13952 - When a vtype is pushed on the stack, a new temporary is created, an
13953 instruction computing its address (LDADDR) is emitted and pushed on
13954 the stack. Need to optimize cases when the vtype is used immediately as in
13955 argument passing, stloc etc.
13956 - Instead of the to_end stuff in the old JIT, simply call the function handling
13957 the values on the stack before emitting the last instruction of the bb.
13960 #endif /* DISABLE_JIT */