2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
9 * Copyright 2003-2010 Novell, Inc (http://www.novell.com)
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
27 #ifdef HAVE_SYS_TIME_H
35 #include <mono/utils/memcheck.h>
37 #include <mono/metadata/assembly.h>
38 #include <mono/metadata/attrdefs.h>
39 #include <mono/metadata/loader.h>
40 #include <mono/metadata/tabledefs.h>
41 #include <mono/metadata/class.h>
42 #include <mono/metadata/object.h>
43 #include <mono/metadata/exception.h>
44 #include <mono/metadata/opcodes.h>
45 #include <mono/metadata/mono-endian.h>
46 #include <mono/metadata/tokentype.h>
47 #include <mono/metadata/tabledefs.h>
48 #include <mono/metadata/marshal.h>
49 #include <mono/metadata/debug-helpers.h>
50 #include <mono/metadata/mono-debug.h>
51 #include <mono/metadata/gc-internal.h>
52 #include <mono/metadata/security-manager.h>
53 #include <mono/metadata/threads-types.h>
54 #include <mono/metadata/security-core-clr.h>
55 #include <mono/metadata/monitor.h>
56 #include <mono/metadata/profiler-private.h>
57 #include <mono/metadata/profiler.h>
58 #include <mono/metadata/debug-mono-symfile.h>
59 #include <mono/utils/mono-compiler.h>
60 #include <mono/utils/mono-memory-model.h>
61 #include <mono/metadata/mono-basic-block.h>
68 #include "jit-icalls.h"
70 #include "debugger-agent.h"
72 #define BRANCH_COST 10
73 #define INLINE_LENGTH_LIMIT 20
74 #define INLINE_FAILURE(msg) do { \
75 if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE)) { \
76 if (cfg->verbose_level >= 2) \
77 printf ("inline failed: %s\n", msg); \
78 goto inline_failure; \
81 #define CHECK_CFG_EXCEPTION do {\
82 if (cfg->exception_type != MONO_EXCEPTION_NONE)\
85 #define METHOD_ACCESS_FAILURE do { \
86 char *method_fname = mono_method_full_name (method, TRUE); \
87 char *cil_method_fname = mono_method_full_name (cil_method, TRUE); \
88 mono_cfg_set_exception (cfg, MONO_EXCEPTION_METHOD_ACCESS); \
89 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname); \
90 g_free (method_fname); \
91 g_free (cil_method_fname); \
92 goto exception_exit; \
94 #define FIELD_ACCESS_FAILURE do { \
95 char *method_fname = mono_method_full_name (method, TRUE); \
96 char *field_fname = mono_field_full_name (field); \
97 mono_cfg_set_exception (cfg, MONO_EXCEPTION_FIELD_ACCESS); \
98 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); \
99 g_free (method_fname); \
100 g_free (field_fname); \
101 goto exception_exit; \
103 #define GENERIC_SHARING_FAILURE(opcode) do { \
104 if (cfg->generic_sharing_context) { \
105 if (cfg->verbose_level > 2) \
106 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __LINE__); \
107 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
108 goto exception_exit; \
111 #define GSHAREDVT_FAILURE(opcode) do { \
112 if (cfg->gsharedvt) { \
113 cfg->exception_message = g_strdup_printf ("gsharedvt failed for method %s.%s.%s/%d opcode %s %s:%d", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __FILE__, __LINE__); \
114 if (cfg->verbose_level >= 2) \
115 printf ("%s\n", cfg->exception_message); \
116 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
117 goto exception_exit; \
120 #define OUT_OF_MEMORY_FAILURE do { \
121 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OUT_OF_MEMORY); \
122 goto exception_exit; \
124 #define DISABLE_AOT(cfg) do { \
125 if ((cfg)->verbose_level >= 2) \
126 printf ("AOT disabled: %s:%d\n", __FILE__, __LINE__); \
127 (cfg)->disable_aot = TRUE; \
130 /* Determine whenever 'ins' represents a load of the 'this' argument */
131 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
133 static int ldind_to_load_membase (int opcode);
134 static int stind_to_store_membase (int opcode);
136 int mono_op_to_op_imm (int opcode);
137 int mono_op_to_op_imm_noemul (int opcode);
139 MONO_API MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
141 /* helper methods signatures */
142 static MonoMethodSignature *helper_sig_class_init_trampoline = NULL;
143 static MonoMethodSignature *helper_sig_domain_get = NULL;
144 static MonoMethodSignature *helper_sig_generic_class_init_trampoline = NULL;
145 static MonoMethodSignature *helper_sig_generic_class_init_trampoline_llvm = NULL;
146 static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline = NULL;
147 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline = NULL;
148 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline_llvm = NULL;
151 * Instruction metadata
159 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
160 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
166 #if SIZEOF_REGISTER == 8 && SIZEOF_REGISTER == SIZEOF_VOID_P
171 /* keep in sync with the enum in mini.h */
174 #include "mini-ops.h"
179 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
180 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
182 * This should contain the index of the last sreg + 1. This is not the same
183 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
185 const gint8 ins_sreg_counts[] = {
186 #include "mini-ops.h"
191 #define MONO_INIT_VARINFO(vi,id) do { \
192 (vi)->range.first_use.pos.bid = 0xffff; \
198 mono_inst_set_src_registers (MonoInst *ins, int *regs)
200 ins->sreg1 = regs [0];
201 ins->sreg2 = regs [1];
202 ins->sreg3 = regs [2];
206 mono_alloc_ireg (MonoCompile *cfg)
208 return alloc_ireg (cfg);
212 mono_alloc_freg (MonoCompile *cfg)
214 return alloc_freg (cfg);
218 mono_alloc_preg (MonoCompile *cfg)
220 return alloc_preg (cfg);
224 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
226 return alloc_dreg (cfg, stack_type);
230 * mono_alloc_ireg_ref:
232 * Allocate an IREG, and mark it as holding a GC ref.
235 mono_alloc_ireg_ref (MonoCompile *cfg)
237 return alloc_ireg_ref (cfg);
241 * mono_alloc_ireg_mp:
243 * Allocate an IREG, and mark it as holding a managed pointer.
246 mono_alloc_ireg_mp (MonoCompile *cfg)
248 return alloc_ireg_mp (cfg);
252 * mono_alloc_ireg_copy:
254 * Allocate an IREG with the same GC type as VREG.
257 mono_alloc_ireg_copy (MonoCompile *cfg, guint32 vreg)
259 if (vreg_is_ref (cfg, vreg))
260 return alloc_ireg_ref (cfg);
261 else if (vreg_is_mp (cfg, vreg))
262 return alloc_ireg_mp (cfg);
264 return alloc_ireg (cfg);
268 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
273 type = mini_replace_type (type);
275 switch (type->type) {
278 case MONO_TYPE_BOOLEAN:
290 case MONO_TYPE_FNPTR:
292 case MONO_TYPE_CLASS:
293 case MONO_TYPE_STRING:
294 case MONO_TYPE_OBJECT:
295 case MONO_TYPE_SZARRAY:
296 case MONO_TYPE_ARRAY:
300 #if SIZEOF_REGISTER == 8
309 case MONO_TYPE_VALUETYPE:
310 if (type->data.klass->enumtype) {
311 type = mono_class_enum_basetype (type->data.klass);
314 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
317 case MONO_TYPE_TYPEDBYREF:
319 case MONO_TYPE_GENERICINST:
320 type = &type->data.generic_class->container_class->byval_arg;
324 g_assert (cfg->generic_sharing_context);
325 if (mini_type_var_is_vt (cfg, type))
330 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
336 mono_print_bb (MonoBasicBlock *bb, const char *msg)
341 printf ("\n%s %d: [IN: ", msg, bb->block_num);
342 for (i = 0; i < bb->in_count; ++i)
343 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
345 for (i = 0; i < bb->out_count; ++i)
346 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
348 for (tree = bb->code; tree; tree = tree->next)
349 mono_print_ins_index (-1, tree);
353 mono_create_helper_signatures (void)
355 helper_sig_domain_get = mono_create_icall_signature ("ptr");
356 helper_sig_class_init_trampoline = mono_create_icall_signature ("void");
357 helper_sig_generic_class_init_trampoline = mono_create_icall_signature ("void");
358 helper_sig_generic_class_init_trampoline_llvm = mono_create_icall_signature ("void ptr");
359 helper_sig_rgctx_lazy_fetch_trampoline = mono_create_icall_signature ("ptr ptr");
360 helper_sig_monitor_enter_exit_trampoline = mono_create_icall_signature ("void");
361 helper_sig_monitor_enter_exit_trampoline_llvm = mono_create_icall_signature ("void object");
365 * When using gsharedvt, some instatiations might be verifiable, and some might be not. i.e.
366 * foo<T> (int i) { ldarg.0; box T; }
368 #define UNVERIFIED do { \
369 if (cfg->gsharedvt) { \
370 if (cfg->verbose_level > 2) \
371 printf ("gsharedvt method failed to verify, falling back to instantiation.\n"); \
372 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
373 goto exception_exit; \
375 if (mini_get_debug_options ()->break_on_unverified) \
381 #define LOAD_ERROR do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto load_error; } while (0)
383 #define TYPE_LOAD_ERROR(klass) do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else { cfg->exception_ptr = klass; goto load_error; } } while (0)
385 #define GET_BBLOCK(cfg,tblock,ip) do { \
386 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
388 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
389 NEW_BBLOCK (cfg, (tblock)); \
390 (tblock)->cil_code = (ip); \
391 ADD_BBLOCK (cfg, (tblock)); \
395 #if defined(TARGET_X86) || defined(TARGET_AMD64)
396 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
397 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
398 (dest)->dreg = alloc_ireg_mp ((cfg)); \
399 (dest)->sreg1 = (sr1); \
400 (dest)->sreg2 = (sr2); \
401 (dest)->inst_imm = (imm); \
402 (dest)->backend.shift_amount = (shift); \
403 MONO_ADD_INS ((cfg)->cbb, (dest)); \
407 #if SIZEOF_REGISTER == 8
408 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
409 /* FIXME: Need to add many more cases */ \
410 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
412 int dr = alloc_preg (cfg); \
413 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
414 (ins)->sreg2 = widen->dreg; \
418 #define ADD_WIDEN_OP(ins, arg1, arg2)
421 #define ADD_BINOP(op) do { \
422 MONO_INST_NEW (cfg, ins, (op)); \
424 ins->sreg1 = sp [0]->dreg; \
425 ins->sreg2 = sp [1]->dreg; \
426 type_from_op (ins, sp [0], sp [1]); \
428 /* Have to insert a widening op */ \
429 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
430 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
431 MONO_ADD_INS ((cfg)->cbb, (ins)); \
432 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
435 #define ADD_UNOP(op) do { \
436 MONO_INST_NEW (cfg, ins, (op)); \
438 ins->sreg1 = sp [0]->dreg; \
439 type_from_op (ins, sp [0], NULL); \
441 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
442 MONO_ADD_INS ((cfg)->cbb, (ins)); \
443 *sp++ = mono_decompose_opcode (cfg, ins); \
446 #define ADD_BINCOND(next_block) do { \
449 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
450 cmp->sreg1 = sp [0]->dreg; \
451 cmp->sreg2 = sp [1]->dreg; \
452 type_from_op (cmp, sp [0], sp [1]); \
454 type_from_op (ins, sp [0], sp [1]); \
455 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
456 GET_BBLOCK (cfg, tblock, target); \
457 link_bblock (cfg, bblock, tblock); \
458 ins->inst_true_bb = tblock; \
459 if ((next_block)) { \
460 link_bblock (cfg, bblock, (next_block)); \
461 ins->inst_false_bb = (next_block); \
462 start_new_bblock = 1; \
464 GET_BBLOCK (cfg, tblock, ip); \
465 link_bblock (cfg, bblock, tblock); \
466 ins->inst_false_bb = tblock; \
467 start_new_bblock = 2; \
469 if (sp != stack_start) { \
470 handle_stack_args (cfg, stack_start, sp - stack_start); \
471 CHECK_UNVERIFIABLE (cfg); \
473 MONO_ADD_INS (bblock, cmp); \
474 MONO_ADD_INS (bblock, ins); \
478 * link_bblock: Links two basic blocks
480 * links two basic blocks in the control flow graph, the 'from'
481 * argument is the starting block and the 'to' argument is the block
482 * the control flow ends to after 'from'.
485 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
487 MonoBasicBlock **newa;
491 if (from->cil_code) {
493 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
495 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
498 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
500 printf ("edge from entry to exit\n");
505 for (i = 0; i < from->out_count; ++i) {
506 if (to == from->out_bb [i]) {
512 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
513 for (i = 0; i < from->out_count; ++i) {
514 newa [i] = from->out_bb [i];
522 for (i = 0; i < to->in_count; ++i) {
523 if (from == to->in_bb [i]) {
529 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
530 for (i = 0; i < to->in_count; ++i) {
531 newa [i] = to->in_bb [i];
540 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
542 link_bblock (cfg, from, to);
546 * mono_find_block_region:
548 * We mark each basic block with a region ID. We use that to avoid BB
549 * optimizations when blocks are in different regions.
552 * A region token that encodes where this region is, and information
553 * about the clause owner for this block.
555 * The region encodes the try/catch/filter clause that owns this block
556 * as well as the type. -1 is a special value that represents a block
557 * that is in none of try/catch/filter.
560 mono_find_block_region (MonoCompile *cfg, int offset)
562 MonoMethodHeader *header = cfg->header;
563 MonoExceptionClause *clause;
566 for (i = 0; i < header->num_clauses; ++i) {
567 clause = &header->clauses [i];
568 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
569 (offset < (clause->handler_offset)))
570 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
572 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
573 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
574 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
575 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
576 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
578 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
581 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
582 return ((i + 1) << 8) | clause->flags;
589 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
591 MonoMethodHeader *header = cfg->header;
592 MonoExceptionClause *clause;
596 for (i = 0; i < header->num_clauses; ++i) {
597 clause = &header->clauses [i];
598 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
599 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
600 if (clause->flags == type)
601 res = g_list_append (res, clause);
608 mono_create_spvar_for_region (MonoCompile *cfg, int region)
612 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
616 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
617 /* prevent it from being register allocated */
618 var->flags |= MONO_INST_VOLATILE;
620 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
624 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
626 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
630 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
634 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
638 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
639 /* prevent it from being register allocated */
640 var->flags |= MONO_INST_VOLATILE;
642 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
648 * Returns the type used in the eval stack when @type is loaded.
649 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
652 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
656 type = mini_replace_type (type);
657 inst->klass = klass = mono_class_from_mono_type (type);
659 inst->type = STACK_MP;
664 switch (type->type) {
666 inst->type = STACK_INV;
670 case MONO_TYPE_BOOLEAN:
676 inst->type = STACK_I4;
681 case MONO_TYPE_FNPTR:
682 inst->type = STACK_PTR;
684 case MONO_TYPE_CLASS:
685 case MONO_TYPE_STRING:
686 case MONO_TYPE_OBJECT:
687 case MONO_TYPE_SZARRAY:
688 case MONO_TYPE_ARRAY:
689 inst->type = STACK_OBJ;
693 inst->type = STACK_I8;
697 inst->type = STACK_R8;
699 case MONO_TYPE_VALUETYPE:
700 if (type->data.klass->enumtype) {
701 type = mono_class_enum_basetype (type->data.klass);
705 inst->type = STACK_VTYPE;
708 case MONO_TYPE_TYPEDBYREF:
709 inst->klass = mono_defaults.typed_reference_class;
710 inst->type = STACK_VTYPE;
712 case MONO_TYPE_GENERICINST:
713 type = &type->data.generic_class->container_class->byval_arg;
717 g_assert (cfg->generic_sharing_context);
718 if (mini_is_gsharedvt_type (cfg, type)) {
719 g_assert (cfg->gsharedvt);
720 inst->type = STACK_VTYPE;
722 inst->type = STACK_OBJ;
726 g_error ("unknown type 0x%02x in eval stack type", type->type);
731 * The following tables are used to quickly validate the IL code in type_from_op ().
734 bin_num_table [STACK_MAX] [STACK_MAX] = {
735 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
736 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
737 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
738 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
739 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV},
740 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
741 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
742 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
747 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV
750 /* reduce the size of this table */
752 bin_int_table [STACK_MAX] [STACK_MAX] = {
753 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
754 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
755 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
756 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
757 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
758 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
759 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
760 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
764 bin_comp_table [STACK_MAX] [STACK_MAX] = {
765 /* Inv i L p F & O vt */
767 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
768 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
769 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
770 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
771 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
772 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
773 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
776 /* reduce the size of this table */
778 shift_table [STACK_MAX] [STACK_MAX] = {
779 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
780 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
781 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
782 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
783 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
784 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
785 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
786 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
790 * Tables to map from the non-specific opcode to the matching
791 * type-specific opcode.
793 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
795 binops_op_map [STACK_MAX] = {
796 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD
799 /* handles from CEE_NEG to CEE_CONV_U8 */
801 unops_op_map [STACK_MAX] = {
802 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG
805 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
807 ovfops_op_map [STACK_MAX] = {
808 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2
811 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
813 ovf2ops_op_map [STACK_MAX] = {
814 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
817 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
819 ovf3ops_op_map [STACK_MAX] = {
820 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1
823 /* handles from CEE_BEQ to CEE_BLT_UN */
825 beqops_op_map [STACK_MAX] = {
826 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ
829 /* handles from CEE_CEQ to CEE_CLT_UN */
831 ceqops_op_map [STACK_MAX] = {
832 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ
836 * Sets ins->type (the type on the eval stack) according to the
837 * type of the opcode and the arguments to it.
838 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
840 * FIXME: this function sets ins->type unconditionally in some cases, but
841 * it should set it to invalid for some types (a conv.x on an object)
844 type_from_op (MonoInst *ins, MonoInst *src1, MonoInst *src2) {
846 switch (ins->opcode) {
853 /* FIXME: check unverifiable args for STACK_MP */
854 ins->type = bin_num_table [src1->type] [src2->type];
855 ins->opcode += binops_op_map [ins->type];
862 ins->type = bin_int_table [src1->type] [src2->type];
863 ins->opcode += binops_op_map [ins->type];
868 ins->type = shift_table [src1->type] [src2->type];
869 ins->opcode += binops_op_map [ins->type];
874 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
875 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
876 ins->opcode = OP_LCOMPARE;
877 else if (src1->type == STACK_R8)
878 ins->opcode = OP_FCOMPARE;
880 ins->opcode = OP_ICOMPARE;
882 case OP_ICOMPARE_IMM:
883 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
884 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
885 ins->opcode = OP_LCOMPARE_IMM;
897 ins->opcode += beqops_op_map [src1->type];
900 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
901 ins->opcode += ceqops_op_map [src1->type];
907 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
908 ins->opcode += ceqops_op_map [src1->type];
912 ins->type = neg_table [src1->type];
913 ins->opcode += unops_op_map [ins->type];
916 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
917 ins->type = src1->type;
919 ins->type = STACK_INV;
920 ins->opcode += unops_op_map [ins->type];
926 ins->type = STACK_I4;
927 ins->opcode += unops_op_map [src1->type];
930 ins->type = STACK_R8;
931 switch (src1->type) {
934 ins->opcode = OP_ICONV_TO_R_UN;
937 ins->opcode = OP_LCONV_TO_R_UN;
941 case CEE_CONV_OVF_I1:
942 case CEE_CONV_OVF_U1:
943 case CEE_CONV_OVF_I2:
944 case CEE_CONV_OVF_U2:
945 case CEE_CONV_OVF_I4:
946 case CEE_CONV_OVF_U4:
947 ins->type = STACK_I4;
948 ins->opcode += ovf3ops_op_map [src1->type];
950 case CEE_CONV_OVF_I_UN:
951 case CEE_CONV_OVF_U_UN:
952 ins->type = STACK_PTR;
953 ins->opcode += ovf2ops_op_map [src1->type];
955 case CEE_CONV_OVF_I1_UN:
956 case CEE_CONV_OVF_I2_UN:
957 case CEE_CONV_OVF_I4_UN:
958 case CEE_CONV_OVF_U1_UN:
959 case CEE_CONV_OVF_U2_UN:
960 case CEE_CONV_OVF_U4_UN:
961 ins->type = STACK_I4;
962 ins->opcode += ovf2ops_op_map [src1->type];
965 ins->type = STACK_PTR;
966 switch (src1->type) {
968 ins->opcode = OP_ICONV_TO_U;
972 #if SIZEOF_VOID_P == 8
973 ins->opcode = OP_LCONV_TO_U;
975 ins->opcode = OP_MOVE;
979 ins->opcode = OP_LCONV_TO_U;
982 ins->opcode = OP_FCONV_TO_U;
988 ins->type = STACK_I8;
989 ins->opcode += unops_op_map [src1->type];
991 case CEE_CONV_OVF_I8:
992 case CEE_CONV_OVF_U8:
993 ins->type = STACK_I8;
994 ins->opcode += ovf3ops_op_map [src1->type];
996 case CEE_CONV_OVF_U8_UN:
997 case CEE_CONV_OVF_I8_UN:
998 ins->type = STACK_I8;
999 ins->opcode += ovf2ops_op_map [src1->type];
1003 ins->type = STACK_R8;
1004 ins->opcode += unops_op_map [src1->type];
1007 ins->type = STACK_R8;
1011 ins->type = STACK_I4;
1012 ins->opcode += ovfops_op_map [src1->type];
1015 case CEE_CONV_OVF_I:
1016 case CEE_CONV_OVF_U:
1017 ins->type = STACK_PTR;
1018 ins->opcode += ovfops_op_map [src1->type];
1021 case CEE_ADD_OVF_UN:
1023 case CEE_MUL_OVF_UN:
1025 case CEE_SUB_OVF_UN:
1026 ins->type = bin_num_table [src1->type] [src2->type];
1027 ins->opcode += ovfops_op_map [src1->type];
1028 if (ins->type == STACK_R8)
1029 ins->type = STACK_INV;
1031 case OP_LOAD_MEMBASE:
1032 ins->type = STACK_PTR;
1034 case OP_LOADI1_MEMBASE:
1035 case OP_LOADU1_MEMBASE:
1036 case OP_LOADI2_MEMBASE:
1037 case OP_LOADU2_MEMBASE:
1038 case OP_LOADI4_MEMBASE:
1039 case OP_LOADU4_MEMBASE:
1040 ins->type = STACK_PTR;
1042 case OP_LOADI8_MEMBASE:
1043 ins->type = STACK_I8;
1045 case OP_LOADR4_MEMBASE:
1046 case OP_LOADR8_MEMBASE:
1047 ins->type = STACK_R8;
1050 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
1054 if (ins->type == STACK_MP)
1055 ins->klass = mono_defaults.object_class;
1060 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
1066 param_table [STACK_MAX] [STACK_MAX] = {
1071 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
1075 switch (args->type) {
1085 for (i = 0; i < sig->param_count; ++i) {
1086 switch (args [i].type) {
1090 if (!sig->params [i]->byref)
1094 if (sig->params [i]->byref)
1096 switch (sig->params [i]->type) {
1097 case MONO_TYPE_CLASS:
1098 case MONO_TYPE_STRING:
1099 case MONO_TYPE_OBJECT:
1100 case MONO_TYPE_SZARRAY:
1101 case MONO_TYPE_ARRAY:
1108 if (sig->params [i]->byref)
1110 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1119 /*if (!param_table [args [i].type] [sig->params [i]->type])
1127 * When we need a pointer to the current domain many times in a method, we
1128 * call mono_domain_get() once and we store the result in a local variable.
1129 * This function returns the variable that represents the MonoDomain*.
1131 inline static MonoInst *
1132 mono_get_domainvar (MonoCompile *cfg)
1134 if (!cfg->domainvar)
1135 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1136 return cfg->domainvar;
1140 * The got_var contains the address of the Global Offset Table when AOT
1144 mono_get_got_var (MonoCompile *cfg)
1146 #ifdef MONO_ARCH_NEED_GOT_VAR
1147 if (!cfg->compile_aot)
1149 if (!cfg->got_var) {
1150 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1152 return cfg->got_var;
1159 mono_get_vtable_var (MonoCompile *cfg)
1161 g_assert (cfg->generic_sharing_context);
1163 if (!cfg->rgctx_var) {
1164 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1165 /* force the var to be stack allocated */
1166 cfg->rgctx_var->flags |= MONO_INST_VOLATILE;
1169 return cfg->rgctx_var;
1173 type_from_stack_type (MonoInst *ins) {
1174 switch (ins->type) {
1175 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1176 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1177 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1178 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1180 return &ins->klass->this_arg;
1181 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1182 case STACK_VTYPE: return &ins->klass->byval_arg;
1184 g_error ("stack type %d to monotype not handled\n", ins->type);
1189 static G_GNUC_UNUSED int
1190 type_to_stack_type (MonoType *t)
1192 t = mono_type_get_underlying_type (t);
1196 case MONO_TYPE_BOOLEAN:
1199 case MONO_TYPE_CHAR:
1206 case MONO_TYPE_FNPTR:
1208 case MONO_TYPE_CLASS:
1209 case MONO_TYPE_STRING:
1210 case MONO_TYPE_OBJECT:
1211 case MONO_TYPE_SZARRAY:
1212 case MONO_TYPE_ARRAY:
1220 case MONO_TYPE_VALUETYPE:
1221 case MONO_TYPE_TYPEDBYREF:
1223 case MONO_TYPE_GENERICINST:
1224 if (mono_type_generic_inst_is_valuetype (t))
1230 g_assert_not_reached ();
1237 array_access_to_klass (int opcode)
1241 return mono_defaults.byte_class;
1243 return mono_defaults.uint16_class;
1246 return mono_defaults.int_class;
1249 return mono_defaults.sbyte_class;
1252 return mono_defaults.int16_class;
1255 return mono_defaults.int32_class;
1257 return mono_defaults.uint32_class;
1260 return mono_defaults.int64_class;
1263 return mono_defaults.single_class;
1266 return mono_defaults.double_class;
1267 case CEE_LDELEM_REF:
1268 case CEE_STELEM_REF:
1269 return mono_defaults.object_class;
1271 g_assert_not_reached ();
1277 * We try to share variables when possible
1280 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1285 /* inlining can result in deeper stacks */
1286 if (slot >= cfg->header->max_stack)
1287 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1289 pos = ins->type - 1 + slot * STACK_MAX;
1291 switch (ins->type) {
1298 if ((vnum = cfg->intvars [pos]))
1299 return cfg->varinfo [vnum];
1300 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1301 cfg->intvars [pos] = res->inst_c0;
1304 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1310 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1313 * Don't use this if a generic_context is set, since that means AOT can't
1314 * look up the method using just the image+token.
1315 * table == 0 means this is a reference made from a wrapper.
1317 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1318 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1319 jump_info_token->image = image;
1320 jump_info_token->token = token;
1321 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1326 * This function is called to handle items that are left on the evaluation stack
1327 * at basic block boundaries. What happens is that we save the values to local variables
1328 * and we reload them later when first entering the target basic block (with the
1329 * handle_loaded_temps () function).
1330 * A single joint point will use the same variables (stored in the array bb->out_stack or
1331 * bb->in_stack, if the basic block is before or after the joint point).
1333 * This function needs to be called _before_ emitting the last instruction of
1334 * the bb (i.e. before emitting a branch).
1335 * If the stack merge fails at a join point, cfg->unverifiable is set.
1338 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1341 MonoBasicBlock *bb = cfg->cbb;
1342 MonoBasicBlock *outb;
1343 MonoInst *inst, **locals;
1348 if (cfg->verbose_level > 3)
1349 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1350 if (!bb->out_scount) {
1351 bb->out_scount = count;
1352 //printf ("bblock %d has out:", bb->block_num);
1354 for (i = 0; i < bb->out_count; ++i) {
1355 outb = bb->out_bb [i];
1356 /* exception handlers are linked, but they should not be considered for stack args */
1357 if (outb->flags & BB_EXCEPTION_HANDLER)
1359 //printf (" %d", outb->block_num);
1360 if (outb->in_stack) {
1362 bb->out_stack = outb->in_stack;
1368 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1369 for (i = 0; i < count; ++i) {
1371 * try to reuse temps already allocated for this purpouse, if they occupy the same
1372 * stack slot and if they are of the same type.
1373 * This won't cause conflicts since if 'local' is used to
1374 * store one of the values in the in_stack of a bblock, then
1375 * the same variable will be used for the same outgoing stack
1377 * This doesn't work when inlining methods, since the bblocks
1378 * in the inlined methods do not inherit their in_stack from
1379 * the bblock they are inlined to. See bug #58863 for an
1382 if (cfg->inlined_method)
1383 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1385 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1390 for (i = 0; i < bb->out_count; ++i) {
1391 outb = bb->out_bb [i];
1392 /* exception handlers are linked, but they should not be considered for stack args */
1393 if (outb->flags & BB_EXCEPTION_HANDLER)
1395 if (outb->in_scount) {
1396 if (outb->in_scount != bb->out_scount) {
1397 cfg->unverifiable = TRUE;
1400 continue; /* check they are the same locals */
1402 outb->in_scount = count;
1403 outb->in_stack = bb->out_stack;
1406 locals = bb->out_stack;
1408 for (i = 0; i < count; ++i) {
1409 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1410 inst->cil_code = sp [i]->cil_code;
1411 sp [i] = locals [i];
1412 if (cfg->verbose_level > 3)
1413 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1417 * It is possible that the out bblocks already have in_stack assigned, and
1418 * the in_stacks differ. In this case, we will store to all the different
1425 /* Find a bblock which has a different in_stack */
1427 while (bindex < bb->out_count) {
1428 outb = bb->out_bb [bindex];
1429 /* exception handlers are linked, but they should not be considered for stack args */
1430 if (outb->flags & BB_EXCEPTION_HANDLER) {
1434 if (outb->in_stack != locals) {
1435 for (i = 0; i < count; ++i) {
1436 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1437 inst->cil_code = sp [i]->cil_code;
1438 sp [i] = locals [i];
1439 if (cfg->verbose_level > 3)
1440 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1442 locals = outb->in_stack;
1451 /* Emit code which loads interface_offsets [klass->interface_id]
1452 * The array is stored in memory before vtable.
1455 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1457 if (cfg->compile_aot) {
1458 int ioffset_reg = alloc_preg (cfg);
1459 int iid_reg = alloc_preg (cfg);
1461 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1462 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1463 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1466 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1471 mini_emit_interface_bitmap_check (MonoCompile *cfg, int intf_bit_reg, int base_reg, int offset, MonoClass *klass)
1473 int ibitmap_reg = alloc_preg (cfg);
1474 #ifdef COMPRESSED_INTERFACE_BITMAP
1476 MonoInst *res, *ins;
1477 NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, ibitmap_reg, base_reg, offset);
1478 MONO_ADD_INS (cfg->cbb, ins);
1480 if (cfg->compile_aot)
1481 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_IID, klass);
1483 EMIT_NEW_ICONST (cfg, args [1], klass->interface_id);
1484 res = mono_emit_jit_icall (cfg, mono_class_interface_match, args);
1485 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, intf_bit_reg, res->dreg);
1487 int ibitmap_byte_reg = alloc_preg (cfg);
1489 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, base_reg, offset);
1491 if (cfg->compile_aot) {
1492 int iid_reg = alloc_preg (cfg);
1493 int shifted_iid_reg = alloc_preg (cfg);
1494 int ibitmap_byte_address_reg = alloc_preg (cfg);
1495 int masked_iid_reg = alloc_preg (cfg);
1496 int iid_one_bit_reg = alloc_preg (cfg);
1497 int iid_bit_reg = alloc_preg (cfg);
1498 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1499 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1500 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1501 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1502 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1503 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1504 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1505 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1507 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1508 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1514 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1515 * stored in "klass_reg" implements the interface "klass".
1518 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1520 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, interface_bitmap), klass);
1524 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1525 * stored in "vtable_reg" implements the interface "klass".
1528 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1530 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, interface_bitmap), klass);
1534 * Emit code which checks whenever the interface id of @klass is smaller than
1535 * than the value given by max_iid_reg.
1538 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1539 MonoBasicBlock *false_target)
1541 if (cfg->compile_aot) {
1542 int iid_reg = alloc_preg (cfg);
1543 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1544 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1547 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1549 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1551 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1554 /* Same as above, but obtains max_iid from a vtable */
1556 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1557 MonoBasicBlock *false_target)
1559 int max_iid_reg = alloc_preg (cfg);
1561 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, max_interface_id));
1562 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1565 /* Same as above, but obtains max_iid from a klass */
1567 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1568 MonoBasicBlock *false_target)
1570 int max_iid_reg = alloc_preg (cfg);
1572 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, max_interface_id));
1573 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1577 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1579 int idepth_reg = alloc_preg (cfg);
1580 int stypes_reg = alloc_preg (cfg);
1581 int stype = alloc_preg (cfg);
1583 mono_class_setup_supertypes (klass);
1585 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1586 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1587 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1588 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1590 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1591 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1593 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1594 } else if (cfg->compile_aot) {
1595 int const_reg = alloc_preg (cfg);
1596 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1597 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1599 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1601 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1605 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1607 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1611 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1613 int intf_reg = alloc_preg (cfg);
1615 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1616 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1617 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1619 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1621 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1625 * Variant of the above that takes a register to the class, not the vtable.
1628 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1630 int intf_bit_reg = alloc_preg (cfg);
1632 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1633 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1634 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1636 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1638 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1642 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1645 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1646 } else if (cfg->compile_aot) {
1647 int const_reg = alloc_preg (cfg);
1648 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1649 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1651 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1653 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1657 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1659 mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1663 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1665 if (cfg->compile_aot) {
1666 int const_reg = alloc_preg (cfg);
1667 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1668 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1670 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1672 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1676 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1679 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1682 int rank_reg = alloc_preg (cfg);
1683 int eclass_reg = alloc_preg (cfg);
1685 g_assert (!klass_inst);
1686 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, rank));
1687 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1688 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1689 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1690 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
1691 if (klass->cast_class == mono_defaults.object_class) {
1692 int parent_reg = alloc_preg (cfg);
1693 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
1694 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1695 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1696 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1697 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1698 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1699 } else if (klass->cast_class == mono_defaults.enum_class) {
1700 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1701 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1702 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1704 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1705 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1708 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1709 /* Check that the object is a vector too */
1710 int bounds_reg = alloc_preg (cfg);
1711 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
1712 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1713 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1716 int idepth_reg = alloc_preg (cfg);
1717 int stypes_reg = alloc_preg (cfg);
1718 int stype = alloc_preg (cfg);
1720 mono_class_setup_supertypes (klass);
1722 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1723 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1724 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1725 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1727 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1728 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1729 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1734 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1736 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1740 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1744 g_assert (val == 0);
1749 if ((size <= 4) && (size <= align)) {
1752 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1755 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1758 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1760 #if SIZEOF_REGISTER == 8
1762 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1768 val_reg = alloc_preg (cfg);
1770 if (SIZEOF_REGISTER == 8)
1771 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1773 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1776 /* This could be optimized further if neccesary */
1778 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1785 #if !NO_UNALIGNED_ACCESS
1786 if (SIZEOF_REGISTER == 8) {
1788 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1793 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1801 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1806 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1811 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1818 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1825 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1826 g_assert (size < 10000);
1829 /* This could be optimized further if neccesary */
1831 cur_reg = alloc_preg (cfg);
1832 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1833 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1840 #if !NO_UNALIGNED_ACCESS
1841 if (SIZEOF_REGISTER == 8) {
1843 cur_reg = alloc_preg (cfg);
1844 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1845 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1854 cur_reg = alloc_preg (cfg);
1855 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1856 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1862 cur_reg = alloc_preg (cfg);
1863 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1864 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1870 cur_reg = alloc_preg (cfg);
1871 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1872 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1880 emit_tls_set (MonoCompile *cfg, int sreg1, int tls_key)
1884 if (cfg->compile_aot) {
1885 EMIT_NEW_TLS_OFFSETCONST (cfg, c, tls_key);
1886 MONO_INST_NEW (cfg, ins, OP_TLS_SET_REG);
1888 ins->sreg2 = c->dreg;
1889 MONO_ADD_INS (cfg->cbb, ins);
1891 MONO_INST_NEW (cfg, ins, OP_TLS_SET);
1893 ins->inst_offset = mini_get_tls_offset (tls_key);
1894 MONO_ADD_INS (cfg->cbb, ins);
1901 * Emit IR to push the current LMF onto the LMF stack.
1904 emit_push_lmf (MonoCompile *cfg)
1907 * Emit IR to push the LMF:
1908 * lmf_addr = <lmf_addr from tls>
1909 * lmf->lmf_addr = lmf_addr
1910 * lmf->prev_lmf = *lmf_addr
1913 int lmf_reg, prev_lmf_reg;
1914 MonoInst *ins, *lmf_ins;
1919 if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
1920 /* Load current lmf */
1921 lmf_ins = mono_get_lmf_intrinsic (cfg);
1923 MONO_ADD_INS (cfg->cbb, lmf_ins);
1924 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
1925 lmf_reg = ins->dreg;
1926 /* Save previous_lmf */
1927 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, G_STRUCT_OFFSET (MonoLMF, previous_lmf), lmf_ins->dreg);
1929 emit_tls_set (cfg, lmf_reg, TLS_KEY_LMF);
1932 * Store lmf_addr in a variable, so it can be allocated to a global register.
1934 if (!cfg->lmf_addr_var)
1935 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1937 lmf_ins = mono_get_lmf_addr_intrinsic (cfg);
1939 MONO_ADD_INS (cfg->cbb, lmf_ins);
1941 lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
1942 lmf_ins->dreg = cfg->lmf_addr_var->dreg;
1944 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
1945 lmf_reg = ins->dreg;
1947 prev_lmf_reg = alloc_preg (cfg);
1948 /* Save previous_lmf */
1949 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, cfg->lmf_addr_var->dreg, 0);
1950 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, G_STRUCT_OFFSET (MonoLMF, previous_lmf), prev_lmf_reg);
1952 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, cfg->lmf_addr_var->dreg, 0, lmf_reg);
1959 * Emit IR to pop the current LMF from the LMF stack.
1962 emit_pop_lmf (MonoCompile *cfg)
1964 int lmf_reg, lmf_addr_reg, prev_lmf_reg;
1970 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
1971 lmf_reg = ins->dreg;
1973 if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
1974 /* Load previous_lmf */
1975 prev_lmf_reg = alloc_preg (cfg);
1976 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
1978 emit_tls_set (cfg, prev_lmf_reg, TLS_KEY_LMF);
1981 * Emit IR to pop the LMF:
1982 * *(lmf->lmf_addr) = lmf->prev_lmf
1984 /* This could be called before emit_push_lmf () */
1985 if (!cfg->lmf_addr_var)
1986 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1987 lmf_addr_reg = cfg->lmf_addr_var->dreg;
1989 prev_lmf_reg = alloc_preg (cfg);
1990 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
1991 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_addr_reg, 0, prev_lmf_reg);
1996 ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
1999 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2002 type = mini_get_basic_type_from_generic (gsctx, type);
2003 type = mini_replace_type (type);
2004 switch (type->type) {
2005 case MONO_TYPE_VOID:
2006 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALL_MEMBASE: OP_VOIDCALL;
2009 case MONO_TYPE_BOOLEAN:
2012 case MONO_TYPE_CHAR:
2015 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2019 case MONO_TYPE_FNPTR:
2020 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2021 case MONO_TYPE_CLASS:
2022 case MONO_TYPE_STRING:
2023 case MONO_TYPE_OBJECT:
2024 case MONO_TYPE_SZARRAY:
2025 case MONO_TYPE_ARRAY:
2026 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2029 return calli? OP_LCALL_REG: virt? OP_LCALL_MEMBASE: OP_LCALL;
2032 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
2033 case MONO_TYPE_VALUETYPE:
2034 if (type->data.klass->enumtype) {
2035 type = mono_class_enum_basetype (type->data.klass);
2038 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2039 case MONO_TYPE_TYPEDBYREF:
2040 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2041 case MONO_TYPE_GENERICINST:
2042 type = &type->data.generic_class->container_class->byval_arg;
2045 case MONO_TYPE_MVAR:
2047 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2049 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
2055 * target_type_is_incompatible:
2056 * @cfg: MonoCompile context
2058 * Check that the item @arg on the evaluation stack can be stored
2059 * in the target type (can be a local, or field, etc).
2060 * The cfg arg can be used to check if we need verification or just
2063 * Returns: non-0 value if arg can't be stored on a target.
2066 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
2068 MonoType *simple_type;
2071 target = mini_replace_type (target);
2072 if (target->byref) {
2073 /* FIXME: check that the pointed to types match */
2074 if (arg->type == STACK_MP)
2075 return arg->klass != mono_class_from_mono_type (target);
2076 if (arg->type == STACK_PTR)
2081 simple_type = mono_type_get_underlying_type (target);
2082 switch (simple_type->type) {
2083 case MONO_TYPE_VOID:
2087 case MONO_TYPE_BOOLEAN:
2090 case MONO_TYPE_CHAR:
2093 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
2097 /* STACK_MP is needed when setting pinned locals */
2098 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2103 case MONO_TYPE_FNPTR:
2105 * Some opcodes like ldloca returns 'transient pointers' which can be stored in
2106 * in native int. (#688008).
2108 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2111 case MONO_TYPE_CLASS:
2112 case MONO_TYPE_STRING:
2113 case MONO_TYPE_OBJECT:
2114 case MONO_TYPE_SZARRAY:
2115 case MONO_TYPE_ARRAY:
2116 if (arg->type != STACK_OBJ)
2118 /* FIXME: check type compatibility */
2122 if (arg->type != STACK_I8)
2127 if (arg->type != STACK_R8)
2130 case MONO_TYPE_VALUETYPE:
2131 if (arg->type != STACK_VTYPE)
2133 klass = mono_class_from_mono_type (simple_type);
2134 if (klass != arg->klass)
2137 case MONO_TYPE_TYPEDBYREF:
2138 if (arg->type != STACK_VTYPE)
2140 klass = mono_class_from_mono_type (simple_type);
2141 if (klass != arg->klass)
2144 case MONO_TYPE_GENERICINST:
2145 if (mono_type_generic_inst_is_valuetype (simple_type)) {
2146 if (arg->type != STACK_VTYPE)
2148 klass = mono_class_from_mono_type (simple_type);
2149 if (klass != arg->klass)
2153 if (arg->type != STACK_OBJ)
2155 /* FIXME: check type compatibility */
2159 case MONO_TYPE_MVAR:
2160 g_assert (cfg->generic_sharing_context);
2161 if (mini_type_var_is_vt (cfg, simple_type)) {
2162 if (arg->type != STACK_VTYPE)
2165 if (arg->type != STACK_OBJ)
2170 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
2176 * Prepare arguments for passing to a function call.
2177 * Return a non-zero value if the arguments can't be passed to the given
2179 * The type checks are not yet complete and some conversions may need
2180 * casts on 32 or 64 bit architectures.
2182 * FIXME: implement this using target_type_is_incompatible ()
2185 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
2187 MonoType *simple_type;
2191 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
2195 for (i = 0; i < sig->param_count; ++i) {
2196 if (sig->params [i]->byref) {
2197 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
2201 simple_type = sig->params [i];
2202 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
2204 switch (simple_type->type) {
2205 case MONO_TYPE_VOID:
2210 case MONO_TYPE_BOOLEAN:
2213 case MONO_TYPE_CHAR:
2216 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2222 case MONO_TYPE_FNPTR:
2223 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2226 case MONO_TYPE_CLASS:
2227 case MONO_TYPE_STRING:
2228 case MONO_TYPE_OBJECT:
2229 case MONO_TYPE_SZARRAY:
2230 case MONO_TYPE_ARRAY:
2231 if (args [i]->type != STACK_OBJ)
2236 if (args [i]->type != STACK_I8)
2241 if (args [i]->type != STACK_R8)
2244 case MONO_TYPE_VALUETYPE:
2245 if (simple_type->data.klass->enumtype) {
2246 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2249 if (args [i]->type != STACK_VTYPE)
2252 case MONO_TYPE_TYPEDBYREF:
2253 if (args [i]->type != STACK_VTYPE)
2256 case MONO_TYPE_GENERICINST:
2257 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2260 case MONO_TYPE_MVAR:
2262 if (args [i]->type != STACK_VTYPE)
2266 g_error ("unknown type 0x%02x in check_call_signature",
2274 callvirt_to_call (int opcode)
2277 case OP_CALL_MEMBASE:
2279 case OP_VOIDCALL_MEMBASE:
2281 case OP_FCALL_MEMBASE:
2283 case OP_VCALL_MEMBASE:
2285 case OP_LCALL_MEMBASE:
2288 g_assert_not_reached ();
2294 #ifdef MONO_ARCH_HAVE_IMT
2295 /* Either METHOD or IMT_ARG needs to be set */
2297 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoMethod *method, MonoInst *imt_arg)
2301 if (COMPILE_LLVM (cfg)) {
2302 method_reg = alloc_preg (cfg);
2305 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2306 } else if (cfg->compile_aot) {
2307 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, method, MONO_PATCH_INFO_METHODCONST);
2310 MONO_INST_NEW (cfg, ins, OP_PCONST);
2311 ins->inst_p0 = method;
2312 ins->dreg = method_reg;
2313 MONO_ADD_INS (cfg->cbb, ins);
2317 call->imt_arg_reg = method_reg;
2319 #ifdef MONO_ARCH_IMT_REG
2320 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2322 /* Need this to keep the IMT arg alive */
2323 mono_call_inst_add_outarg_reg (cfg, call, method_reg, 0, FALSE);
2328 #ifdef MONO_ARCH_IMT_REG
2329 method_reg = alloc_preg (cfg);
2332 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2333 } else if (cfg->compile_aot) {
2334 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, method, MONO_PATCH_INFO_METHODCONST);
2337 MONO_INST_NEW (cfg, ins, OP_PCONST);
2338 ins->inst_p0 = method;
2339 ins->dreg = method_reg;
2340 MONO_ADD_INS (cfg->cbb, ins);
2343 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2345 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2350 static MonoJumpInfo *
2351 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2353 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2357 ji->data.target = target;
2363 mini_class_check_context_used (MonoCompile *cfg, MonoClass *klass)
2365 if (cfg->generic_sharing_context)
2366 return mono_class_check_context_used (klass);
2372 mini_method_check_context_used (MonoCompile *cfg, MonoMethod *method)
2374 if (cfg->generic_sharing_context)
2375 return mono_method_check_context_used (method);
2381 * check_method_sharing:
2383 * Check whenever the vtable or an mrgctx needs to be passed when calling CMETHOD.
2386 check_method_sharing (MonoCompile *cfg, MonoMethod *cmethod, gboolean *out_pass_vtable, gboolean *out_pass_mrgctx)
2388 gboolean pass_vtable = FALSE;
2389 gboolean pass_mrgctx = FALSE;
2391 if (((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
2392 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
2393 gboolean sharable = FALSE;
2395 if (mono_method_is_generic_sharable (cmethod, TRUE)) {
2398 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
2399 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
2400 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
2402 sharable = sharing_enabled && context_sharable;
2406 * Pass vtable iff target method might
2407 * be shared, which means that sharing
2408 * is enabled for its class and its
2409 * context is sharable (and it's not a
2412 if (sharable && !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
2416 if (mini_method_get_context (cmethod) &&
2417 mini_method_get_context (cmethod)->method_inst) {
2418 g_assert (!pass_vtable);
2420 if (mono_method_is_generic_sharable (cmethod, TRUE)) {
2423 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
2424 MonoGenericContext *context = mini_method_get_context (cmethod);
2425 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
2427 if (sharing_enabled && context_sharable)
2429 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, mono_method_signature (cmethod)))
2434 if (out_pass_vtable)
2435 *out_pass_vtable = pass_vtable;
2436 if (out_pass_mrgctx)
2437 *out_pass_mrgctx = pass_mrgctx;
2440 inline static MonoCallInst *
2441 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2442 MonoInst **args, int calli, int virtual, int tail, int rgctx, int unbox_trampoline)
2446 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2451 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2453 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
2456 call->signature = sig;
2457 call->rgctx_reg = rgctx;
2458 sig_ret = mini_replace_type (sig->ret);
2460 type_to_eval_stack_type ((cfg), sig_ret, &call->inst);
2463 if (mini_type_is_vtype (cfg, sig_ret)) {
2464 call->vret_var = cfg->vret_addr;
2465 //g_assert_not_reached ();
2467 } else if (mini_type_is_vtype (cfg, sig_ret)) {
2468 MonoInst *temp = mono_compile_create_var (cfg, sig_ret, OP_LOCAL);
2471 temp->backend.is_pinvoke = sig->pinvoke;
2474 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2475 * address of return value to increase optimization opportunities.
2476 * Before vtype decomposition, the dreg of the call ins itself represents the
2477 * fact the call modifies the return value. After decomposition, the call will
2478 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2479 * will be transformed into an LDADDR.
2481 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2482 loada->dreg = alloc_preg (cfg);
2483 loada->inst_p0 = temp;
2484 /* We reference the call too since call->dreg could change during optimization */
2485 loada->inst_p1 = call;
2486 MONO_ADD_INS (cfg->cbb, loada);
2488 call->inst.dreg = temp->dreg;
2490 call->vret_var = loada;
2491 } else if (!MONO_TYPE_IS_VOID (sig_ret))
2492 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2494 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2495 if (COMPILE_SOFT_FLOAT (cfg)) {
2497 * If the call has a float argument, we would need to do an r8->r4 conversion using
2498 * an icall, but that cannot be done during the call sequence since it would clobber
2499 * the call registers + the stack. So we do it before emitting the call.
2501 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2503 MonoInst *in = call->args [i];
2505 if (i >= sig->hasthis)
2506 t = sig->params [i - sig->hasthis];
2508 t = &mono_defaults.int_class->byval_arg;
2509 t = mono_type_get_underlying_type (t);
2511 if (!t->byref && t->type == MONO_TYPE_R4) {
2512 MonoInst *iargs [1];
2516 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2518 /* The result will be in an int vreg */
2519 call->args [i] = conv;
2525 call->need_unbox_trampoline = unbox_trampoline;
2528 if (COMPILE_LLVM (cfg))
2529 mono_llvm_emit_call (cfg, call);
2531 mono_arch_emit_call (cfg, call);
2533 mono_arch_emit_call (cfg, call);
2536 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2537 cfg->flags |= MONO_CFG_HAS_CALLS;
2543 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2545 #ifdef MONO_ARCH_RGCTX_REG
2546 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2547 cfg->uses_rgctx_reg = TRUE;
2548 call->rgctx_reg = TRUE;
2550 call->rgctx_arg_reg = rgctx_reg;
2557 inline static MonoInst*
2558 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *imt_arg, MonoInst *rgctx_arg)
2564 rgctx_reg = mono_alloc_preg (cfg);
2565 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2568 call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE, rgctx_arg ? TRUE : FALSE, FALSE);
2570 call->inst.sreg1 = addr->dreg;
2573 emit_imt_argument (cfg, call, NULL, imt_arg);
2575 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2578 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2580 return (MonoInst*)call;
2584 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2587 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type);
2589 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2592 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig, gboolean tail,
2593 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *rgctx_arg)
2595 #ifndef DISABLE_REMOTING
2596 gboolean might_be_remote = FALSE;
2598 gboolean virtual = this != NULL;
2599 gboolean enable_for_aot = TRUE;
2603 gboolean need_unbox_trampoline;
2606 sig = mono_method_signature (method);
2609 rgctx_reg = mono_alloc_preg (cfg);
2610 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2613 if (method->string_ctor) {
2614 /* Create the real signature */
2615 /* FIXME: Cache these */
2616 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2617 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2622 context_used = mini_method_check_context_used (cfg, method);
2624 #ifndef DISABLE_REMOTING
2625 might_be_remote = this && sig->hasthis &&
2626 (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) &&
2627 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && (!MONO_CHECK_THIS (this) || context_used);
2629 if (might_be_remote && context_used) {
2632 g_assert (cfg->generic_sharing_context);
2634 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2636 return mono_emit_calli (cfg, sig, args, addr, NULL, NULL);
2640 need_unbox_trampoline = method->klass == mono_defaults.object_class || (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE);
2642 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, tail, rgctx_arg ? TRUE : FALSE, need_unbox_trampoline);
2644 #ifndef DISABLE_REMOTING
2645 if (might_be_remote)
2646 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2649 call->method = method;
2650 call->inst.flags |= MONO_INST_HAS_METHOD;
2651 call->inst.inst_left = this;
2652 call->tail_call = tail;
2655 int vtable_reg, slot_reg, this_reg;
2658 this_reg = this->dreg;
2660 if (ARCH_HAVE_DELEGATE_TRAMPOLINES && (method->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (method->name, "Invoke")) {
2661 MonoInst *dummy_use;
2663 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2665 /* Make a call to delegate->invoke_impl */
2666 call->inst.inst_basereg = this_reg;
2667 call->inst.inst_offset = G_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2668 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2670 /* We must emit a dummy use here because the delegate trampoline will
2671 replace the 'this' argument with the delegate target making this activation
2672 no longer a root for the delegate.
2673 This is an issue for delegates that target collectible code such as dynamic
2674 methods of GC'able assemblies.
2676 For a test case look into #667921.
2678 FIXME: a dummy use is not the best way to do it as the local register allocator
2679 will put it on a caller save register and spil it around the call.
2680 Ideally, we would either put it on a callee save register or only do the store part.
2682 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [0]);
2684 return (MonoInst*)call;
2687 if ((!cfg->compile_aot || enable_for_aot) &&
2688 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2689 (MONO_METHOD_IS_FINAL (method) &&
2690 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2691 !(mono_class_is_marshalbyref (method->klass) && context_used)) {
2693 * the method is not virtual, we just need to ensure this is not null
2694 * and then we can call the method directly.
2696 #ifndef DISABLE_REMOTING
2697 if (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) {
2699 * The check above ensures method is not gshared, this is needed since
2700 * gshared methods can't have wrappers.
2702 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2706 if (!method->string_ctor)
2707 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2709 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2710 } else if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2712 * the method is virtual, but we can statically dispatch since either
2713 * it's class or the method itself are sealed.
2714 * But first we need to ensure it's not a null reference.
2716 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2718 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2720 vtable_reg = alloc_preg (cfg);
2721 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2722 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2724 #ifdef MONO_ARCH_HAVE_IMT
2726 guint32 imt_slot = mono_method_get_imt_slot (method);
2727 emit_imt_argument (cfg, call, call->method, imt_arg);
2728 slot_reg = vtable_reg;
2729 offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2732 if (slot_reg == -1) {
2733 slot_reg = alloc_preg (cfg);
2734 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2735 offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
2738 slot_reg = vtable_reg;
2739 offset = G_STRUCT_OFFSET (MonoVTable, vtable) +
2740 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2741 #ifdef MONO_ARCH_HAVE_IMT
2743 g_assert (mono_method_signature (method)->generic_param_count);
2744 emit_imt_argument (cfg, call, call->method, imt_arg);
2749 call->inst.sreg1 = slot_reg;
2750 call->inst.inst_offset = offset;
2751 call->virtual = TRUE;
2755 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2758 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2760 return (MonoInst*)call;
2764 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2766 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), FALSE, args, this, NULL, NULL);
2770 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2777 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE, FALSE, FALSE);
2780 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2782 return (MonoInst*)call;
2786 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2788 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2792 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2796 * mono_emit_abs_call:
2798 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2800 inline static MonoInst*
2801 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2802 MonoMethodSignature *sig, MonoInst **args)
2804 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2808 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2811 if (cfg->abs_patches == NULL)
2812 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2813 g_hash_table_insert (cfg->abs_patches, ji, ji);
2814 ins = mono_emit_native_call (cfg, ji, sig, args);
2815 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2820 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2822 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2823 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2827 * Native code might return non register sized integers
2828 * without initializing the upper bits.
2830 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
2831 case OP_LOADI1_MEMBASE:
2832 widen_op = OP_ICONV_TO_I1;
2834 case OP_LOADU1_MEMBASE:
2835 widen_op = OP_ICONV_TO_U1;
2837 case OP_LOADI2_MEMBASE:
2838 widen_op = OP_ICONV_TO_I2;
2840 case OP_LOADU2_MEMBASE:
2841 widen_op = OP_ICONV_TO_U2;
2847 if (widen_op != -1) {
2848 int dreg = alloc_preg (cfg);
2851 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
2852 widen->type = ins->type;
2862 get_memcpy_method (void)
2864 static MonoMethod *memcpy_method = NULL;
2865 if (!memcpy_method) {
2866 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2868 g_error ("Old corlib found. Install a new one");
2870 return memcpy_method;
2874 create_write_barrier_bitmap (MonoCompile *cfg, MonoClass *klass, unsigned *wb_bitmap, int offset)
2876 MonoClassField *field;
2877 gpointer iter = NULL;
2879 while ((field = mono_class_get_fields (klass, &iter))) {
2882 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
2884 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
2885 if (mini_type_is_reference (cfg, mono_field_get_type (field))) {
2886 g_assert ((foffset % SIZEOF_VOID_P) == 0);
2887 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
2889 MonoClass *field_class = mono_class_from_mono_type (field->type);
2890 if (field_class->has_references)
2891 create_write_barrier_bitmap (cfg, field_class, wb_bitmap, offset + foffset);
2897 emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value)
2899 int card_table_shift_bits;
2900 gpointer card_table_mask;
2902 MonoInst *dummy_use;
2903 int nursery_shift_bits;
2904 size_t nursery_size;
2905 gboolean has_card_table_wb = FALSE;
2907 if (!cfg->gen_write_barriers)
2910 card_table = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
2912 mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
2914 #ifdef MONO_ARCH_HAVE_CARD_TABLE_WBARRIER_IN_AOT
2915 if (cfg->compile_aot)
2916 has_card_table_wb = TRUE;
2918 #ifdef MONO_ARCH_HAVE_CARD_TABLE_WBARRIER
2919 has_card_table_wb = TRUE;
2922 if (has_card_table_wb && card_table && nursery_shift_bits > 0 && !COMPILE_LLVM (cfg)) {
2925 MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
2926 wbarrier->sreg1 = ptr->dreg;
2927 wbarrier->sreg2 = value->dreg;
2928 MONO_ADD_INS (cfg->cbb, wbarrier);
2929 } else if (card_table) {
2930 int offset_reg = alloc_preg (cfg);
2931 int card_reg = alloc_preg (cfg);
2934 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
2935 if (card_table_mask)
2936 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
2938 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
2939 * IMM's larger than 32bits.
2941 if (cfg->compile_aot) {
2942 MONO_EMIT_NEW_AOTCONST (cfg, card_reg, NULL, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR);
2944 MONO_INST_NEW (cfg, ins, OP_PCONST);
2945 ins->inst_p0 = card_table;
2946 ins->dreg = card_reg;
2947 MONO_ADD_INS (cfg->cbb, ins);
2950 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
2951 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
2953 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
2954 mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
2957 EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
2961 mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
2963 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
2964 unsigned need_wb = 0;
2969 /*types with references can't have alignment smaller than sizeof(void*) */
2970 if (align < SIZEOF_VOID_P)
2973 /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
2974 if (size > 32 * SIZEOF_VOID_P)
2977 create_write_barrier_bitmap (cfg, klass, &need_wb, 0);
2979 /* We don't unroll more than 5 stores to avoid code bloat. */
2980 if (size > 5 * SIZEOF_VOID_P) {
2981 /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
2982 size += (SIZEOF_VOID_P - 1);
2983 size &= ~(SIZEOF_VOID_P - 1);
2985 EMIT_NEW_ICONST (cfg, iargs [2], size);
2986 EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
2987 mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
2991 destreg = iargs [0]->dreg;
2992 srcreg = iargs [1]->dreg;
2995 dest_ptr_reg = alloc_preg (cfg);
2996 tmp_reg = alloc_preg (cfg);
2999 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
3001 while (size >= SIZEOF_VOID_P) {
3002 MonoInst *load_inst;
3003 MONO_INST_NEW (cfg, load_inst, OP_LOAD_MEMBASE);
3004 load_inst->dreg = tmp_reg;
3005 load_inst->inst_basereg = srcreg;
3006 load_inst->inst_offset = offset;
3007 MONO_ADD_INS (cfg->cbb, load_inst);
3009 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
3012 emit_write_barrier (cfg, iargs [0], load_inst);
3014 offset += SIZEOF_VOID_P;
3015 size -= SIZEOF_VOID_P;
3018 /*tmp += sizeof (void*)*/
3019 if (size >= SIZEOF_VOID_P) {
3020 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
3021 MONO_ADD_INS (cfg->cbb, iargs [0]);
3025 /* Those cannot be references since size < sizeof (void*) */
3027 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
3028 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
3034 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
3035 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
3041 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
3042 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
3051 * Emit code to copy a valuetype of type @klass whose address is stored in
3052 * @src->dreg to memory whose address is stored at @dest->dreg.
3055 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
3057 MonoInst *iargs [4];
3058 int context_used, n;
3060 MonoMethod *memcpy_method;
3061 MonoInst *size_ins = NULL;
3062 MonoInst *memcpy_ins = NULL;
3066 * This check breaks with spilled vars... need to handle it during verification anyway.
3067 * g_assert (klass && klass == src->klass && klass == dest->klass);
3070 if (mini_is_gsharedvt_klass (cfg, klass)) {
3072 context_used = mini_class_check_context_used (cfg, klass);
3073 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3074 memcpy_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_MEMCPY);
3078 n = mono_class_native_size (klass, &align);
3080 n = mono_class_value_size (klass, &align);
3082 /* if native is true there should be no references in the struct */
3083 if (cfg->gen_write_barriers && (klass->has_references || size_ins) && !native) {
3084 /* Avoid barriers when storing to the stack */
3085 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
3086 (dest->opcode == OP_LDADDR))) {
3092 context_used = mini_class_check_context_used (cfg, klass);
3094 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
3095 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
3097 } else if (context_used) {
3098 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3100 if (cfg->compile_aot) {
3101 EMIT_NEW_CLASSCONST (cfg, iargs [2], klass);
3103 EMIT_NEW_PCONST (cfg, iargs [2], klass);
3104 mono_class_compute_gc_descriptor (klass);
3109 mono_emit_jit_icall (cfg, mono_gsharedvt_value_copy, iargs);
3111 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
3116 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
3117 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
3118 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
3123 iargs [2] = size_ins;
3125 EMIT_NEW_ICONST (cfg, iargs [2], n);
3127 memcpy_method = get_memcpy_method ();
3129 mono_emit_calli (cfg, mono_method_signature (memcpy_method), iargs, memcpy_ins, NULL, NULL);
3131 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
3136 get_memset_method (void)
3138 static MonoMethod *memset_method = NULL;
3139 if (!memset_method) {
3140 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
3142 g_error ("Old corlib found. Install a new one");
3144 return memset_method;
3148 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
3150 MonoInst *iargs [3];
3151 int n, context_used;
3153 MonoMethod *memset_method;
3154 MonoInst *size_ins = NULL;
3155 MonoInst *bzero_ins = NULL;
3156 static MonoMethod *bzero_method;
3158 /* FIXME: Optimize this for the case when dest is an LDADDR */
3160 mono_class_init (klass);
3161 if (mini_is_gsharedvt_klass (cfg, klass)) {
3162 context_used = mini_class_check_context_used (cfg, klass);
3163 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3164 bzero_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_BZERO);
3166 bzero_method = mono_class_get_method_from_name (mono_defaults.string_class, "bzero_aligned_1", 2);
3167 g_assert (bzero_method);
3169 iargs [1] = size_ins;
3170 mono_emit_calli (cfg, mono_method_signature (bzero_method), iargs, bzero_ins, NULL, NULL);
3174 n = mono_class_value_size (klass, &align);
3176 if (n <= sizeof (gpointer) * 5) {
3177 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
3180 memset_method = get_memset_method ();
3182 EMIT_NEW_ICONST (cfg, iargs [1], 0);
3183 EMIT_NEW_ICONST (cfg, iargs [2], n);
3184 mono_emit_method_call (cfg, memset_method, iargs, NULL);
3189 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
3191 MonoInst *this = NULL;
3193 g_assert (cfg->generic_sharing_context);
3195 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
3196 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
3197 !method->klass->valuetype)
3198 EMIT_NEW_ARGLOAD (cfg, this, 0);
3200 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
3201 MonoInst *mrgctx_loc, *mrgctx_var;
3204 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
3206 mrgctx_loc = mono_get_vtable_var (cfg);
3207 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
3210 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
3211 MonoInst *vtable_loc, *vtable_var;
3215 vtable_loc = mono_get_vtable_var (cfg);
3216 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
3218 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
3219 MonoInst *mrgctx_var = vtable_var;
3222 vtable_reg = alloc_preg (cfg);
3223 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, G_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
3224 vtable_var->type = STACK_PTR;
3232 vtable_reg = alloc_preg (cfg);
3233 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3238 static MonoJumpInfoRgctxEntry *
3239 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, MonoRgctxInfoType info_type)
3241 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
3242 res->method = method;
3243 res->in_mrgctx = in_mrgctx;
3244 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
3245 res->data->type = patch_type;
3246 res->data->data.target = patch_data;
3247 res->info_type = info_type;
3252 static inline MonoInst*
3253 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3255 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
3259 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
3260 MonoClass *klass, MonoRgctxInfoType rgctx_type)
3262 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
3263 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3265 return emit_rgctx_fetch (cfg, rgctx, entry);
3269 emit_get_rgctx_sig (MonoCompile *cfg, int context_used,
3270 MonoMethodSignature *sig, MonoRgctxInfoType rgctx_type)
3272 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_SIGNATURE, sig, rgctx_type);
3273 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3275 return emit_rgctx_fetch (cfg, rgctx, entry);
3279 emit_get_rgctx_gsharedvt_call (MonoCompile *cfg, int context_used,
3280 MonoMethodSignature *sig, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3282 MonoJumpInfoGSharedVtCall *call_info;
3283 MonoJumpInfoRgctxEntry *entry;
3286 call_info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoGSharedVtCall));
3287 call_info->sig = sig;
3288 call_info->method = cmethod;
3290 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_CALL, call_info, rgctx_type);
3291 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3293 return emit_rgctx_fetch (cfg, rgctx, entry);
3298 emit_get_rgctx_gsharedvt_method (MonoCompile *cfg, int context_used,
3299 MonoMethod *cmethod, MonoGSharedVtMethodInfo *info)
3301 MonoJumpInfoRgctxEntry *entry;
3304 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_METHOD, info, MONO_RGCTX_INFO_METHOD_GSHAREDVT_INFO);
3305 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3307 return emit_rgctx_fetch (cfg, rgctx, entry);
3311 * emit_get_rgctx_method:
3313 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
3314 * normal constants, else emit a load from the rgctx.
3317 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
3318 MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3320 if (!context_used) {
3323 switch (rgctx_type) {
3324 case MONO_RGCTX_INFO_METHOD:
3325 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
3327 case MONO_RGCTX_INFO_METHOD_RGCTX:
3328 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
3331 g_assert_not_reached ();
3334 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
3335 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3337 return emit_rgctx_fetch (cfg, rgctx, entry);
3342 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
3343 MonoClassField *field, MonoRgctxInfoType rgctx_type)
3345 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
3346 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3348 return emit_rgctx_fetch (cfg, rgctx, entry);
3352 get_gsharedvt_info_slot (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3354 MonoGSharedVtMethodInfo *info = cfg->gsharedvt_info;
3355 MonoRuntimeGenericContextInfoTemplate *template;
3360 for (i = 0; i < info->num_entries; ++i) {
3361 MonoRuntimeGenericContextInfoTemplate *otemplate = &info->entries [i];
3363 if (otemplate->info_type == rgctx_type && otemplate->data == data && rgctx_type != MONO_RGCTX_INFO_LOCAL_OFFSET)
3367 if (info->num_entries == info->count_entries) {
3368 MonoRuntimeGenericContextInfoTemplate *new_entries;
3369 int new_count_entries = info->count_entries ? info->count_entries * 2 : 16;
3371 new_entries = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * new_count_entries);
3373 memcpy (new_entries, info->entries, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
3374 info->entries = new_entries;
3375 info->count_entries = new_count_entries;
3378 idx = info->num_entries;
3379 template = &info->entries [idx];
3380 template->info_type = rgctx_type;
3381 template->data = data;
3383 info->num_entries ++;
3389 * emit_get_gsharedvt_info:
3391 * This is similar to emit_get_rgctx_.., but loads the data from the gsharedvt info var instead of calling an rgctx fetch trampoline.
3394 emit_get_gsharedvt_info (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3399 idx = get_gsharedvt_info_slot (cfg, data, rgctx_type);
3400 /* Load info->entries [idx] */
3401 dreg = alloc_preg (cfg);
3402 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, cfg->gsharedvt_info_var->dreg, G_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
3408 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type)
3410 return emit_get_gsharedvt_info (cfg, &klass->byval_arg, rgctx_type);
3414 * On return the caller must check @klass for load errors.
3417 emit_generic_class_init (MonoCompile *cfg, MonoClass *klass)
3419 MonoInst *vtable_arg;
3423 context_used = mini_class_check_context_used (cfg, klass);
3426 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
3427 klass, MONO_RGCTX_INFO_VTABLE);
3429 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3433 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
3436 if (COMPILE_LLVM (cfg))
3437 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline_llvm, &vtable_arg);
3439 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable_arg);
3440 #ifdef MONO_ARCH_VTABLE_REG
3441 mono_call_inst_add_outarg_reg (cfg, call, vtable_arg->dreg, MONO_ARCH_VTABLE_REG, FALSE);
3442 cfg->uses_vtable_reg = TRUE;
3449 emit_seq_point (MonoCompile *cfg, MonoMethod *method, guint8* ip, gboolean intr_loc, gboolean nonempty_stack)
3453 if (cfg->gen_seq_points && cfg->method == method) {
3454 NEW_SEQ_POINT (cfg, ins, ip - cfg->header->code, intr_loc);
3456 ins->flags |= MONO_INST_NONEMPTY_STACK;
3457 MONO_ADD_INS (cfg->cbb, ins);
3462 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg, gboolean null_check, MonoBasicBlock **out_bblock)
3464 if (mini_get_debug_options ()->better_cast_details) {
3465 int to_klass_reg = alloc_preg (cfg);
3466 int vtable_reg = alloc_preg (cfg);
3467 int klass_reg = alloc_preg (cfg);
3468 MonoBasicBlock *is_null_bb = NULL;
3472 NEW_BBLOCK (cfg, is_null_bb);
3474 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3475 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3478 tls_get = mono_get_jit_tls_intrinsic (cfg);
3480 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3484 MONO_ADD_INS (cfg->cbb, tls_get);
3485 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3486 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3488 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3489 MONO_EMIT_NEW_PCONST (cfg, to_klass_reg, klass);
3490 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3493 MONO_START_BB (cfg, is_null_bb);
3495 *out_bblock = cfg->cbb;
3501 reset_cast_details (MonoCompile *cfg)
3503 /* Reset the variables holding the cast details */
3504 if (mini_get_debug_options ()->better_cast_details) {
3505 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3507 MONO_ADD_INS (cfg->cbb, tls_get);
3508 /* It is enough to reset the from field */
3509 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3514 * On return the caller must check @array_class for load errors
3517 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
3519 int vtable_reg = alloc_preg (cfg);
3522 context_used = mini_class_check_context_used (cfg, array_class);
3524 save_cast_details (cfg, array_class, obj->dreg, FALSE, NULL);
3526 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3528 if (cfg->opt & MONO_OPT_SHARED) {
3529 int class_reg = alloc_preg (cfg);
3530 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3531 if (cfg->compile_aot) {
3532 int klass_reg = alloc_preg (cfg);
3533 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
3534 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
3536 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
3538 } else if (context_used) {
3539 MonoInst *vtable_ins;
3541 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
3542 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
3544 if (cfg->compile_aot) {
3548 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3550 vt_reg = alloc_preg (cfg);
3551 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
3552 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
3555 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3557 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
3561 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
3563 reset_cast_details (cfg);
3567 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
3568 * generic code is generated.
3571 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
3573 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
3576 MonoInst *rgctx, *addr;
3578 /* FIXME: What if the class is shared? We might not
3579 have to get the address of the method from the
3581 addr = emit_get_rgctx_method (cfg, context_used, method,
3582 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3584 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3586 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
3588 gboolean pass_vtable, pass_mrgctx;
3589 MonoInst *rgctx_arg = NULL;
3591 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
3592 g_assert (!pass_mrgctx);
3595 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3598 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
3601 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
3606 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
3610 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
3611 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
3612 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
3613 int rank_reg = alloc_dreg (cfg ,STACK_I4);
3615 obj_reg = sp [0]->dreg;
3616 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3617 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3619 /* FIXME: generics */
3620 g_assert (klass->rank == 0);
3623 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
3624 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3626 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3627 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, element_class));
3630 MonoInst *element_class;
3632 /* This assertion is from the unboxcast insn */
3633 g_assert (klass->rank == 0);
3635 element_class = emit_get_rgctx_klass (cfg, context_used,
3636 klass->element_class, MONO_RGCTX_INFO_KLASS);
3638 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
3639 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3641 save_cast_details (cfg, klass->element_class, obj_reg, FALSE, NULL);
3642 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
3643 reset_cast_details (cfg);
3646 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), obj_reg, sizeof (MonoObject));
3647 MONO_ADD_INS (cfg->cbb, add);
3648 add->type = STACK_MP;
3655 handle_unbox_gsharedvt (MonoCompile *cfg, MonoClass *klass, MonoInst *obj, MonoBasicBlock **out_cbb)
3657 MonoInst *addr, *klass_inst, *is_ref, *args[16];
3658 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
3662 klass_inst = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_KLASS);
3668 args [1] = klass_inst;
3671 obj = mono_emit_jit_icall (cfg, mono_object_castclass_unbox, args);
3673 NEW_BBLOCK (cfg, is_ref_bb);
3674 NEW_BBLOCK (cfg, is_nullable_bb);
3675 NEW_BBLOCK (cfg, end_bb);
3676 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
3677 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 1);
3678 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
3680 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 2);
3681 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
3683 /* This will contain either the address of the unboxed vtype, or an address of the temporary where the ref is stored */
3684 addr_reg = alloc_dreg (cfg, STACK_MP);
3688 NEW_BIALU_IMM (cfg, addr, OP_ADD_IMM, addr_reg, obj->dreg, sizeof (MonoObject));
3689 MONO_ADD_INS (cfg->cbb, addr);
3691 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3694 MONO_START_BB (cfg, is_ref_bb);
3696 /* Save the ref to a temporary */
3697 dreg = alloc_ireg (cfg);
3698 EMIT_NEW_VARLOADA_VREG (cfg, addr, dreg, &klass->byval_arg);
3699 addr->dreg = addr_reg;
3700 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, obj->dreg);
3701 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3704 MONO_START_BB (cfg, is_nullable_bb);
3707 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_NULLABLE_CLASS_UNBOX);
3708 MonoInst *unbox_call;
3709 MonoMethodSignature *unbox_sig;
3712 var = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
3714 unbox_sig = mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
3715 unbox_sig->ret = &klass->byval_arg;
3716 unbox_sig->param_count = 1;
3717 unbox_sig->params [0] = &mono_defaults.object_class->byval_arg;
3718 unbox_call = mono_emit_calli (cfg, unbox_sig, &obj, addr, NULL, NULL);
3720 EMIT_NEW_VARLOADA_VREG (cfg, addr, unbox_call->dreg, &klass->byval_arg);
3721 addr->dreg = addr_reg;
3724 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3727 MONO_START_BB (cfg, end_bb);
3730 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr_reg, 0);
3732 *out_cbb = cfg->cbb;
3738 * Returns NULL and set the cfg exception on error.
3741 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
3743 MonoInst *iargs [2];
3749 MonoInst *iargs [2];
3751 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (klass, for_box);
3753 if (cfg->opt & MONO_OPT_SHARED)
3754 rgctx_info = MONO_RGCTX_INFO_KLASS;
3756 rgctx_info = MONO_RGCTX_INFO_VTABLE;
3757 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
3759 if (cfg->opt & MONO_OPT_SHARED) {
3760 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3762 alloc_ftn = mono_object_new;
3765 alloc_ftn = mono_object_new_specific;
3768 if (managed_alloc && !(cfg->opt & MONO_OPT_SHARED))
3769 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3771 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3774 if (cfg->opt & MONO_OPT_SHARED) {
3775 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3776 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
3778 alloc_ftn = mono_object_new;
3779 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
3780 /* This happens often in argument checking code, eg. throw new FooException... */
3781 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
3782 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
3783 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
3785 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3786 MonoMethod *managed_alloc = NULL;
3790 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
3791 cfg->exception_ptr = klass;
3795 #ifndef MONO_CROSS_COMPILE
3796 managed_alloc = mono_gc_get_managed_allocator (klass, for_box);
3799 if (managed_alloc) {
3800 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3801 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3803 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
3805 guint32 lw = vtable->klass->instance_size;
3806 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
3807 EMIT_NEW_ICONST (cfg, iargs [0], lw);
3808 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
3811 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3815 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3819 * Returns NULL and set the cfg exception on error.
3822 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used, MonoBasicBlock **out_cbb)
3824 MonoInst *alloc, *ins;
3826 *out_cbb = cfg->cbb;
3828 if (mono_class_is_nullable (klass)) {
3829 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
3832 /* FIXME: What if the class is shared? We might not
3833 have to get the method address from the RGCTX. */
3834 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
3835 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3836 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3838 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
3840 gboolean pass_vtable, pass_mrgctx;
3841 MonoInst *rgctx_arg = NULL;
3843 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
3844 g_assert (!pass_mrgctx);
3847 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3850 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
3853 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
3857 if (mini_is_gsharedvt_klass (cfg, klass)) {
3858 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
3859 MonoInst *res, *is_ref, *src_var, *addr;
3862 dreg = alloc_ireg (cfg);
3864 NEW_BBLOCK (cfg, is_ref_bb);
3865 NEW_BBLOCK (cfg, is_nullable_bb);
3866 NEW_BBLOCK (cfg, end_bb);
3867 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
3868 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 1);
3869 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
3871 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 2);
3872 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
3875 alloc = handle_alloc (cfg, klass, TRUE, context_used);
3878 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3879 ins->opcode = OP_STOREV_MEMBASE;
3881 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, alloc->dreg);
3882 res->type = STACK_OBJ;
3884 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3887 MONO_START_BB (cfg, is_ref_bb);
3888 addr_reg = alloc_ireg (cfg);
3890 /* val is a vtype, so has to load the value manually */
3891 src_var = get_vreg_to_inst (cfg, val->dreg);
3893 src_var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, val->dreg);
3894 EMIT_NEW_VARLOADA (cfg, addr, src_var, src_var->inst_vtype);
3895 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, addr->dreg, 0);
3896 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3899 MONO_START_BB (cfg, is_nullable_bb);
3902 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass,
3903 MONO_RGCTX_INFO_NULLABLE_CLASS_BOX);
3905 MonoMethodSignature *box_sig;
3908 * klass is Nullable<T>, need to call Nullable<T>.Box () using a gsharedvt signature, but we cannot
3909 * construct that method at JIT time, so have to do things by hand.
3911 box_sig = mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
3912 box_sig->ret = &mono_defaults.object_class->byval_arg;
3913 box_sig->param_count = 1;
3914 box_sig->params [0] = &klass->byval_arg;
3915 box_call = mono_emit_calli (cfg, box_sig, &val, addr, NULL, NULL);
3916 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, box_call->dreg);
3917 res->type = STACK_OBJ;
3921 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3923 MONO_START_BB (cfg, end_bb);
3925 *out_cbb = cfg->cbb;
3929 alloc = handle_alloc (cfg, klass, TRUE, context_used);
3933 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3940 mini_class_has_reference_variant_generic_argument (MonoCompile *cfg, MonoClass *klass, int context_used)
3943 MonoGenericContainer *container;
3944 MonoGenericInst *ginst;
3946 if (klass->generic_class) {
3947 container = klass->generic_class->container_class->generic_container;
3948 ginst = klass->generic_class->context.class_inst;
3949 } else if (klass->generic_container && context_used) {
3950 container = klass->generic_container;
3951 ginst = container->context.class_inst;
3956 for (i = 0; i < container->type_argc; ++i) {
3958 if (!(mono_generic_container_get_param_info (container, i)->flags & (MONO_GEN_PARAM_VARIANT|MONO_GEN_PARAM_COVARIANT)))
3960 type = ginst->type_argv [i];
3961 if (mini_type_is_reference (cfg, type))
3967 // FIXME: This doesn't work yet (class libs tests fail?)
3968 #define is_complex_isinst(klass) (TRUE || (klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || mono_class_is_marshalbyref (klass) || (klass->flags & TYPE_ATTRIBUTE_SEALED) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
3971 * Returns NULL and set the cfg exception on error.
3974 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3976 MonoBasicBlock *is_null_bb;
3977 int obj_reg = src->dreg;
3978 int vtable_reg = alloc_preg (cfg);
3979 MonoInst *klass_inst = NULL;
3984 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
3985 MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
3986 MonoInst *cache_ins;
3988 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
3993 /* klass - it's the second element of the cache entry*/
3994 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
3997 args [2] = cache_ins;
3999 return mono_emit_method_call (cfg, mono_castclass, args, NULL);
4002 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
4005 NEW_BBLOCK (cfg, is_null_bb);
4007 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4008 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
4010 save_cast_details (cfg, klass, obj_reg, FALSE, NULL);
4012 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4013 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4014 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
4016 int klass_reg = alloc_preg (cfg);
4018 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4020 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4021 /* the remoting code is broken, access the class for now */
4022 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4023 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4025 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4026 cfg->exception_ptr = klass;
4029 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4031 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4032 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4034 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
4036 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4037 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
4041 MONO_START_BB (cfg, is_null_bb);
4043 reset_cast_details (cfg);
4049 * Returns NULL and set the cfg exception on error.
4052 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
4055 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
4056 int obj_reg = src->dreg;
4057 int vtable_reg = alloc_preg (cfg);
4058 int res_reg = alloc_ireg_ref (cfg);
4059 MonoInst *klass_inst = NULL;
4064 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
4065 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
4066 MonoInst *cache_ins;
4068 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
4073 /* klass - it's the second element of the cache entry*/
4074 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
4077 args [2] = cache_ins;
4079 return mono_emit_method_call (cfg, mono_isinst, args, NULL);
4082 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
4085 NEW_BBLOCK (cfg, is_null_bb);
4086 NEW_BBLOCK (cfg, false_bb);
4087 NEW_BBLOCK (cfg, end_bb);
4089 /* Do the assignment at the beginning, so the other assignment can be if converted */
4090 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
4091 ins->type = STACK_OBJ;
4094 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4095 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
4097 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4099 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4100 g_assert (!context_used);
4101 /* the is_null_bb target simply copies the input register to the output */
4102 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
4104 int klass_reg = alloc_preg (cfg);
4107 int rank_reg = alloc_preg (cfg);
4108 int eclass_reg = alloc_preg (cfg);
4110 g_assert (!context_used);
4111 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
4112 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
4113 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4114 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4115 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
4116 if (klass->cast_class == mono_defaults.object_class) {
4117 int parent_reg = alloc_preg (cfg);
4118 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
4119 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
4120 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4121 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4122 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
4123 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
4124 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4125 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4126 } else if (klass->cast_class == mono_defaults.enum_class) {
4127 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4128 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4129 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
4130 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4132 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
4133 /* Check that the object is a vector too */
4134 int bounds_reg = alloc_preg (cfg);
4135 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
4136 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
4137 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4140 /* the is_null_bb target simply copies the input register to the output */
4141 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4143 } else if (mono_class_is_nullable (klass)) {
4144 g_assert (!context_used);
4145 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4146 /* the is_null_bb target simply copies the input register to the output */
4147 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
4149 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4150 g_assert (!context_used);
4151 /* the remoting code is broken, access the class for now */
4152 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4153 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4155 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4156 cfg->exception_ptr = klass;
4159 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4161 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4162 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4164 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4165 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
4167 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4168 /* the is_null_bb target simply copies the input register to the output */
4169 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
4174 MONO_START_BB (cfg, false_bb);
4176 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
4177 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4179 MONO_START_BB (cfg, is_null_bb);
4181 MONO_START_BB (cfg, end_bb);
4187 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4189 /* This opcode takes as input an object reference and a class, and returns:
4190 0) if the object is an instance of the class,
4191 1) if the object is not instance of the class,
4192 2) if the object is a proxy whose type cannot be determined */
4195 #ifndef DISABLE_REMOTING
4196 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
4198 MonoBasicBlock *true_bb, *false_bb, *end_bb;
4200 int obj_reg = src->dreg;
4201 int dreg = alloc_ireg (cfg);
4203 #ifndef DISABLE_REMOTING
4204 int klass_reg = alloc_preg (cfg);
4207 NEW_BBLOCK (cfg, true_bb);
4208 NEW_BBLOCK (cfg, false_bb);
4209 NEW_BBLOCK (cfg, end_bb);
4210 #ifndef DISABLE_REMOTING
4211 NEW_BBLOCK (cfg, false2_bb);
4212 NEW_BBLOCK (cfg, no_proxy_bb);
4215 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4216 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
4218 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4219 #ifndef DISABLE_REMOTING
4220 NEW_BBLOCK (cfg, interface_fail_bb);
4223 tmp_reg = alloc_preg (cfg);
4224 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4225 #ifndef DISABLE_REMOTING
4226 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
4227 MONO_START_BB (cfg, interface_fail_bb);
4228 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4230 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
4232 tmp_reg = alloc_preg (cfg);
4233 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4234 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4235 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
4237 mini_emit_iface_cast (cfg, tmp_reg, klass, false_bb, true_bb);
4240 #ifndef DISABLE_REMOTING
4241 tmp_reg = alloc_preg (cfg);
4242 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4243 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4245 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4246 tmp_reg = alloc_preg (cfg);
4247 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4248 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4250 tmp_reg = alloc_preg (cfg);
4251 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4252 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4253 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4255 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
4256 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
4258 MONO_START_BB (cfg, no_proxy_bb);
4260 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
4262 g_error ("transparent proxy support is disabled while trying to JIT code that uses it");
4266 MONO_START_BB (cfg, false_bb);
4268 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4269 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4271 #ifndef DISABLE_REMOTING
4272 MONO_START_BB (cfg, false2_bb);
4274 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
4275 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4278 MONO_START_BB (cfg, true_bb);
4280 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4282 MONO_START_BB (cfg, end_bb);
4285 MONO_INST_NEW (cfg, ins, OP_ICONST);
4287 ins->type = STACK_I4;
4293 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4295 /* This opcode takes as input an object reference and a class, and returns:
4296 0) if the object is an instance of the class,
4297 1) if the object is a proxy whose type cannot be determined
4298 an InvalidCastException exception is thrown otherwhise*/
4301 #ifndef DISABLE_REMOTING
4302 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
4304 MonoBasicBlock *ok_result_bb;
4306 int obj_reg = src->dreg;
4307 int dreg = alloc_ireg (cfg);
4308 int tmp_reg = alloc_preg (cfg);
4310 #ifndef DISABLE_REMOTING
4311 int klass_reg = alloc_preg (cfg);
4312 NEW_BBLOCK (cfg, end_bb);
4315 NEW_BBLOCK (cfg, ok_result_bb);
4317 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4318 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
4320 save_cast_details (cfg, klass, obj_reg, FALSE, NULL);
4322 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4323 #ifndef DISABLE_REMOTING
4324 NEW_BBLOCK (cfg, interface_fail_bb);
4326 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4327 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
4328 MONO_START_BB (cfg, interface_fail_bb);
4329 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4331 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
4333 tmp_reg = alloc_preg (cfg);
4334 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4335 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4336 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
4338 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4339 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4341 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4342 mini_emit_iface_cast (cfg, tmp_reg, klass, NULL, NULL);
4343 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, ok_result_bb);
4346 #ifndef DISABLE_REMOTING
4347 NEW_BBLOCK (cfg, no_proxy_bb);
4349 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4350 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4351 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4353 tmp_reg = alloc_preg (cfg);
4354 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4355 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4357 tmp_reg = alloc_preg (cfg);
4358 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4359 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4360 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4362 NEW_BBLOCK (cfg, fail_1_bb);
4364 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
4366 MONO_START_BB (cfg, fail_1_bb);
4368 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4369 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4371 MONO_START_BB (cfg, no_proxy_bb);
4373 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
4375 g_error ("Transparent proxy support is disabled while trying to JIT code that uses it");
4379 MONO_START_BB (cfg, ok_result_bb);
4381 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4383 #ifndef DISABLE_REMOTING
4384 MONO_START_BB (cfg, end_bb);
4388 MONO_INST_NEW (cfg, ins, OP_ICONST);
4390 ins->type = STACK_I4;
4396 * Returns NULL and set the cfg exception on error.
4398 static G_GNUC_UNUSED MonoInst*
4399 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used)
4403 gpointer *trampoline;
4404 MonoInst *obj, *method_ins, *tramp_ins;
4408 obj = handle_alloc (cfg, klass, FALSE, 0);
4412 /* Inline the contents of mono_delegate_ctor */
4414 /* Set target field */
4415 /* Optimize away setting of NULL target */
4416 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0)) {
4417 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
4418 if (cfg->gen_write_barriers) {
4419 dreg = alloc_preg (cfg);
4420 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target));
4421 emit_write_barrier (cfg, ptr, target);
4425 /* Set method field */
4426 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
4427 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
4428 if (cfg->gen_write_barriers) {
4429 dreg = alloc_preg (cfg);
4430 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method));
4431 emit_write_barrier (cfg, ptr, method_ins);
4434 * To avoid looking up the compiled code belonging to the target method
4435 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
4436 * store it, and we fill it after the method has been compiled.
4438 if (!method->dynamic && !(cfg->opt & MONO_OPT_SHARED)) {
4439 MonoInst *code_slot_ins;
4442 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
4444 domain = mono_domain_get ();
4445 mono_domain_lock (domain);
4446 if (!domain_jit_info (domain)->method_code_hash)
4447 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
4448 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
4450 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
4451 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
4453 mono_domain_unlock (domain);
4455 if (cfg->compile_aot)
4456 EMIT_NEW_AOTCONST (cfg, code_slot_ins, MONO_PATCH_INFO_METHOD_CODE_SLOT, method);
4458 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
4460 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
4463 /* Set invoke_impl field */
4464 if (cfg->compile_aot) {
4465 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, klass);
4467 trampoline = mono_create_delegate_trampoline (cfg->domain, klass);
4468 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
4470 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
4472 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
4478 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
4480 MonoJitICallInfo *info;
4482 /* Need to register the icall so it gets an icall wrapper */
4483 info = mono_get_array_new_va_icall (rank);
4485 cfg->flags |= MONO_CFG_HAS_VARARGS;
4487 /* mono_array_new_va () needs a vararg calling convention */
4488 cfg->disable_llvm = TRUE;
4490 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
4491 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
4495 mono_emit_load_got_addr (MonoCompile *cfg)
4497 MonoInst *getaddr, *dummy_use;
4499 if (!cfg->got_var || cfg->got_var_allocated)
4502 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
4503 getaddr->cil_code = cfg->header->code;
4504 getaddr->dreg = cfg->got_var->dreg;
4506 /* Add it to the start of the first bblock */
4507 if (cfg->bb_entry->code) {
4508 getaddr->next = cfg->bb_entry->code;
4509 cfg->bb_entry->code = getaddr;
4512 MONO_ADD_INS (cfg->bb_entry, getaddr);
4514 cfg->got_var_allocated = TRUE;
4517 * Add a dummy use to keep the got_var alive, since real uses might
4518 * only be generated by the back ends.
4519 * Add it to end_bblock, so the variable's lifetime covers the whole
4521 * It would be better to make the usage of the got var explicit in all
4522 * cases when the backend needs it (i.e. calls, throw etc.), so this
4523 * wouldn't be needed.
4525 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
4526 MONO_ADD_INS (cfg->bb_exit, dummy_use);
4529 static int inline_limit;
4530 static gboolean inline_limit_inited;
4533 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
4535 MonoMethodHeaderSummary header;
4537 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
4538 MonoMethodSignature *sig = mono_method_signature (method);
4542 if (cfg->generic_sharing_context)
4545 if (cfg->inline_depth > 10)
4548 #ifdef MONO_ARCH_HAVE_LMF_OPS
4549 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
4550 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
4551 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
4556 if (!mono_method_get_header_summary (method, &header))
4559 /*runtime, icall and pinvoke are checked by summary call*/
4560 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
4561 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
4562 (mono_class_is_marshalbyref (method->klass)) ||
4566 /* also consider num_locals? */
4567 /* Do the size check early to avoid creating vtables */
4568 if (!inline_limit_inited) {
4569 if (g_getenv ("MONO_INLINELIMIT"))
4570 inline_limit = atoi (g_getenv ("MONO_INLINELIMIT"));
4572 inline_limit = INLINE_LENGTH_LIMIT;
4573 inline_limit_inited = TRUE;
4575 if (header.code_size >= inline_limit && !(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
4579 * if we can initialize the class of the method right away, we do,
4580 * otherwise we don't allow inlining if the class needs initialization,
4581 * since it would mean inserting a call to mono_runtime_class_init()
4582 * inside the inlined code
4584 if (!(cfg->opt & MONO_OPT_SHARED)) {
4585 /* The AggressiveInlining hint is a good excuse to force that cctor to run. */
4586 if (method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING) {
4587 vtable = mono_class_vtable (cfg->domain, method->klass);
4590 if (cfg->compile_aot && mono_class_needs_cctor_run (method->klass, NULL))
4592 mono_runtime_class_init (vtable);
4593 } else if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
4594 if (cfg->run_cctors && method->klass->has_cctor) {
4595 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
4596 if (!method->klass->runtime_info)
4597 /* No vtable created yet */
4599 vtable = mono_class_vtable (cfg->domain, method->klass);
4602 /* This makes so that inline cannot trigger */
4603 /* .cctors: too many apps depend on them */
4604 /* running with a specific order... */
4605 if (! vtable->initialized)
4607 mono_runtime_class_init (vtable);
4609 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
4610 if (!method->klass->runtime_info)
4611 /* No vtable created yet */
4613 vtable = mono_class_vtable (cfg->domain, method->klass);
4616 if (!vtable->initialized)
4621 * If we're compiling for shared code
4622 * the cctor will need to be run at aot method load time, for example,
4623 * or at the end of the compilation of the inlining method.
4625 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
4630 * CAS - do not inline methods with declarative security
4631 * Note: this has to be before any possible return TRUE;
4633 if (mono_security_method_has_declsec (method))
4636 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
4637 if (mono_arch_is_soft_float ()) {
4639 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
4641 for (i = 0; i < sig->param_count; ++i)
4642 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
4651 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoClass *klass, MonoVTable *vtable)
4653 if (!cfg->compile_aot) {
4655 if (vtable->initialized)
4659 if (klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
4660 if (cfg->method == method)
4664 if (!mono_class_needs_cctor_run (klass, method))
4667 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (klass == method->klass))
4668 /* The initialization is already done before the method is called */
4675 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
4679 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
4682 if (mini_is_gsharedvt_variable_klass (cfg, klass)) {
4685 mono_class_init (klass);
4686 size = mono_class_array_element_size (klass);
4689 mult_reg = alloc_preg (cfg);
4690 array_reg = arr->dreg;
4691 index_reg = index->dreg;
4693 #if SIZEOF_REGISTER == 8
4694 /* The array reg is 64 bits but the index reg is only 32 */
4695 if (COMPILE_LLVM (cfg)) {
4697 index2_reg = index_reg;
4699 index2_reg = alloc_preg (cfg);
4700 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
4703 if (index->type == STACK_I8) {
4704 index2_reg = alloc_preg (cfg);
4705 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
4707 index2_reg = index_reg;
4712 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
4714 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4715 if (size == 1 || size == 2 || size == 4 || size == 8) {
4716 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
4718 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], G_STRUCT_OFFSET (MonoArray, vector));
4719 ins->klass = mono_class_get_element_class (klass);
4720 ins->type = STACK_MP;
4726 add_reg = alloc_ireg_mp (cfg);
4729 MonoInst *rgctx_ins;
4732 g_assert (cfg->generic_sharing_context);
4733 context_used = mini_class_check_context_used (cfg, klass);
4734 g_assert (context_used);
4735 rgctx_ins = emit_get_gsharedvt_info (cfg, &klass->byval_arg, MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE);
4736 MONO_EMIT_NEW_BIALU (cfg, OP_IMUL, mult_reg, index2_reg, rgctx_ins->dreg);
4738 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
4740 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
4741 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
4742 ins->klass = mono_class_get_element_class (klass);
4743 ins->type = STACK_MP;
4744 MONO_ADD_INS (cfg->cbb, ins);
4749 #ifndef MONO_ARCH_EMULATE_MUL_DIV
4751 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
4753 int bounds_reg = alloc_preg (cfg);
4754 int add_reg = alloc_ireg_mp (cfg);
4755 int mult_reg = alloc_preg (cfg);
4756 int mult2_reg = alloc_preg (cfg);
4757 int low1_reg = alloc_preg (cfg);
4758 int low2_reg = alloc_preg (cfg);
4759 int high1_reg = alloc_preg (cfg);
4760 int high2_reg = alloc_preg (cfg);
4761 int realidx1_reg = alloc_preg (cfg);
4762 int realidx2_reg = alloc_preg (cfg);
4763 int sum_reg = alloc_preg (cfg);
4764 int index1, index2, tmpreg;
4768 mono_class_init (klass);
4769 size = mono_class_array_element_size (klass);
4771 index1 = index_ins1->dreg;
4772 index2 = index_ins2->dreg;
4774 #if SIZEOF_REGISTER == 8
4775 /* The array reg is 64 bits but the index reg is only 32 */
4776 if (COMPILE_LLVM (cfg)) {
4779 tmpreg = alloc_preg (cfg);
4780 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index1);
4782 tmpreg = alloc_preg (cfg);
4783 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index2);
4787 // FIXME: Do we need to do something here for i8 indexes, like in ldelema_1_ins ?
4791 /* range checking */
4792 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
4793 arr->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
4795 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
4796 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4797 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
4798 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
4799 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
4800 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
4801 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4803 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
4804 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4805 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
4806 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
4807 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, length));
4808 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
4809 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4811 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
4812 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
4813 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
4814 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
4815 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
4817 ins->type = STACK_MP;
4819 MONO_ADD_INS (cfg->cbb, ins);
4826 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
4830 MonoMethod *addr_method;
4833 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
4836 return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], TRUE);
4838 #ifndef MONO_ARCH_EMULATE_MUL_DIV
4839 /* emit_ldelema_2 depends on OP_LMUL */
4840 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS)) {
4841 return mini_emit_ldelema_2_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], sp [2]);
4845 element_size = mono_class_array_element_size (cmethod->klass->element_class);
4846 addr_method = mono_marshal_get_array_address (rank, element_size);
4847 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
4852 static MonoBreakPolicy
4853 always_insert_breakpoint (MonoMethod *method)
4855 return MONO_BREAK_POLICY_ALWAYS;
4858 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
4861 * mono_set_break_policy:
4862 * policy_callback: the new callback function
4864 * Allow embedders to decide wherther to actually obey breakpoint instructions
4865 * (both break IL instructions and Debugger.Break () method calls), for example
4866 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
4867 * untrusted or semi-trusted code.
4869 * @policy_callback will be called every time a break point instruction needs to
4870 * be inserted with the method argument being the method that calls Debugger.Break()
4871 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
4872 * if it wants the breakpoint to not be effective in the given method.
4873 * #MONO_BREAK_POLICY_ALWAYS is the default.
4876 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
4878 if (policy_callback)
4879 break_policy_func = policy_callback;
4881 break_policy_func = always_insert_breakpoint;
4885 should_insert_brekpoint (MonoMethod *method) {
4886 switch (break_policy_func (method)) {
4887 case MONO_BREAK_POLICY_ALWAYS:
4889 case MONO_BREAK_POLICY_NEVER:
4891 case MONO_BREAK_POLICY_ON_DBG:
4892 g_warning ("mdb no longer supported");
4895 g_warning ("Incorrect value returned from break policy callback");
4900 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
4902 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
4904 MonoInst *addr, *store, *load;
4905 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
4907 /* the bounds check is already done by the callers */
4908 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
4910 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
4911 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
4912 if (mini_type_is_reference (cfg, fsig->params [2]))
4913 emit_write_barrier (cfg, addr, load);
4915 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
4916 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
4923 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
4925 return mini_type_is_reference (cfg, &klass->byval_arg);
4929 emit_array_store (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, gboolean safety_checks)
4931 if (safety_checks && generic_class_is_reference_type (cfg, klass) &&
4932 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
4933 MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class, 1);
4934 MonoMethod *helper = mono_marshal_get_virtual_stelemref (obj_array);
4935 MonoInst *iargs [3];
4938 mono_class_setup_vtable (obj_array);
4939 g_assert (helper->slot);
4941 if (sp [0]->type != STACK_OBJ)
4943 if (sp [2]->type != STACK_OBJ)
4950 return mono_emit_method_call (cfg, helper, iargs, sp [0]);
4954 if (mini_is_gsharedvt_variable_klass (cfg, klass)) {
4957 // FIXME-VT: OP_ICONST optimization
4958 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
4959 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
4960 ins->opcode = OP_STOREV_MEMBASE;
4961 } else if (sp [1]->opcode == OP_ICONST) {
4962 int array_reg = sp [0]->dreg;
4963 int index_reg = sp [1]->dreg;
4964 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
4967 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
4968 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
4970 MonoInst *addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], safety_checks);
4971 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
4972 if (generic_class_is_reference_type (cfg, klass))
4973 emit_write_barrier (cfg, addr, sp [2]);
4980 emit_array_unsafe_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
4985 eklass = mono_class_from_mono_type (fsig->params [2]);
4987 eklass = mono_class_from_mono_type (fsig->ret);
4991 return emit_array_store (cfg, eklass, args, FALSE);
4993 MonoInst *ins, *addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
4994 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &eklass->byval_arg, addr->dreg, 0);
5000 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5002 #ifdef MONO_ARCH_SIMD_INTRINSICS
5003 MonoInst *ins = NULL;
5005 if (cfg->opt & MONO_OPT_SIMD) {
5006 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
5012 return mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
5016 emit_memory_barrier (MonoCompile *cfg, int kind)
5018 MonoInst *ins = NULL;
5019 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
5020 MONO_ADD_INS (cfg->cbb, ins);
5021 ins->backend.memory_barrier_kind = kind;
5027 llvm_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5029 MonoInst *ins = NULL;
5032 /* The LLVM backend supports these intrinsics */
5033 if (cmethod->klass == mono_defaults.math_class) {
5034 if (strcmp (cmethod->name, "Sin") == 0) {
5036 } else if (strcmp (cmethod->name, "Cos") == 0) {
5038 } else if (strcmp (cmethod->name, "Sqrt") == 0) {
5040 } else if (strcmp (cmethod->name, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8) {
5045 MONO_INST_NEW (cfg, ins, opcode);
5046 ins->type = STACK_R8;
5047 ins->dreg = mono_alloc_freg (cfg);
5048 ins->sreg1 = args [0]->dreg;
5049 MONO_ADD_INS (cfg->cbb, ins);
5053 if (cfg->opt & MONO_OPT_CMOV) {
5054 if (strcmp (cmethod->name, "Min") == 0) {
5055 if (fsig->params [0]->type == MONO_TYPE_I4)
5057 if (fsig->params [0]->type == MONO_TYPE_U4)
5058 opcode = OP_IMIN_UN;
5059 else if (fsig->params [0]->type == MONO_TYPE_I8)
5061 else if (fsig->params [0]->type == MONO_TYPE_U8)
5062 opcode = OP_LMIN_UN;
5063 } else if (strcmp (cmethod->name, "Max") == 0) {
5064 if (fsig->params [0]->type == MONO_TYPE_I4)
5066 if (fsig->params [0]->type == MONO_TYPE_U4)
5067 opcode = OP_IMAX_UN;
5068 else if (fsig->params [0]->type == MONO_TYPE_I8)
5070 else if (fsig->params [0]->type == MONO_TYPE_U8)
5071 opcode = OP_LMAX_UN;
5076 MONO_INST_NEW (cfg, ins, opcode);
5077 ins->type = fsig->params [0]->type == MONO_TYPE_I4 ? STACK_I4 : STACK_I8;
5078 ins->dreg = mono_alloc_ireg (cfg);
5079 ins->sreg1 = args [0]->dreg;
5080 ins->sreg2 = args [1]->dreg;
5081 MONO_ADD_INS (cfg->cbb, ins);
5089 mini_emit_inst_for_sharable_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5091 if (cmethod->klass == mono_defaults.array_class) {
5092 if (strcmp (cmethod->name, "UnsafeStore") == 0)
5093 return emit_array_unsafe_access (cfg, fsig, args, TRUE);
5094 if (strcmp (cmethod->name, "UnsafeLoad") == 0)
5095 return emit_array_unsafe_access (cfg, fsig, args, FALSE);
5102 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5104 MonoInst *ins = NULL;
5106 static MonoClass *runtime_helpers_class = NULL;
5107 if (! runtime_helpers_class)
5108 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
5109 "System.Runtime.CompilerServices", "RuntimeHelpers");
5111 if (cmethod->klass == mono_defaults.string_class) {
5112 if (strcmp (cmethod->name, "get_Chars") == 0) {
5113 int dreg = alloc_ireg (cfg);
5114 int index_reg = alloc_preg (cfg);
5115 int mult_reg = alloc_preg (cfg);
5116 int add_reg = alloc_preg (cfg);
5118 #if SIZEOF_REGISTER == 8
5119 /* The array reg is 64 bits but the index reg is only 32 */
5120 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
5122 index_reg = args [1]->dreg;
5124 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
5126 #if defined(TARGET_X86) || defined(TARGET_AMD64)
5127 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, G_STRUCT_OFFSET (MonoString, chars));
5128 add_reg = ins->dreg;
5129 /* Avoid a warning */
5131 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5134 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
5135 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
5136 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5137 add_reg, G_STRUCT_OFFSET (MonoString, chars));
5139 type_from_op (ins, NULL, NULL);
5141 } else if (strcmp (cmethod->name, "get_Length") == 0) {
5142 int dreg = alloc_ireg (cfg);
5143 /* Decompose later to allow more optimizations */
5144 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
5145 ins->type = STACK_I4;
5146 ins->flags |= MONO_INST_FAULT;
5147 cfg->cbb->has_array_access = TRUE;
5148 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
5151 } else if (strcmp (cmethod->name, "InternalSetChar") == 0) {
5152 int mult_reg = alloc_preg (cfg);
5153 int add_reg = alloc_preg (cfg);
5155 /* The corlib functions check for oob already. */
5156 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
5157 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
5158 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, G_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
5159 return cfg->cbb->last_ins;
5162 } else if (cmethod->klass == mono_defaults.object_class) {
5164 if (strcmp (cmethod->name, "GetType") == 0) {
5165 int dreg = alloc_ireg_ref (cfg);
5166 int vt_reg = alloc_preg (cfg);
5167 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
5168 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, G_STRUCT_OFFSET (MonoVTable, type));
5169 type_from_op (ins, NULL, NULL);
5172 #if !defined(MONO_ARCH_EMULATE_MUL_DIV)
5173 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0 && !mono_gc_is_moving ()) {
5174 int dreg = alloc_ireg (cfg);
5175 int t1 = alloc_ireg (cfg);
5177 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
5178 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
5179 ins->type = STACK_I4;
5183 } else if (strcmp (cmethod->name, ".ctor") == 0) {
5184 MONO_INST_NEW (cfg, ins, OP_NOP);
5185 MONO_ADD_INS (cfg->cbb, ins);
5189 } else if (cmethod->klass == mono_defaults.array_class) {
5190 if (!cfg->gsharedvt && strcmp (cmethod->name + 1, "etGenericValueImpl") == 0)
5191 return emit_array_generic_access (cfg, fsig, args, *cmethod->name == 'S');
5193 #ifndef MONO_BIG_ARRAYS
5195 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
5198 if ((strcmp (cmethod->name, "GetLength") == 0 || strcmp (cmethod->name, "GetLowerBound") == 0) && args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
5199 int dreg = alloc_ireg (cfg);
5200 int bounds_reg = alloc_ireg_mp (cfg);
5201 MonoBasicBlock *end_bb, *szarray_bb;
5202 gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
5204 NEW_BBLOCK (cfg, end_bb);
5205 NEW_BBLOCK (cfg, szarray_bb);
5207 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
5208 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
5209 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
5210 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
5211 /* Non-szarray case */
5213 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5214 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
5216 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5217 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5218 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
5219 MONO_START_BB (cfg, szarray_bb);
5222 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5223 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
5225 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
5226 MONO_START_BB (cfg, end_bb);
5228 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
5229 ins->type = STACK_I4;
5235 if (cmethod->name [0] != 'g')
5238 if (strcmp (cmethod->name, "get_Rank") == 0) {
5239 int dreg = alloc_ireg (cfg);
5240 int vtable_reg = alloc_preg (cfg);
5241 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
5242 args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
5243 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
5244 vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
5245 type_from_op (ins, NULL, NULL);
5248 } else if (strcmp (cmethod->name, "get_Length") == 0) {
5249 int dreg = alloc_ireg (cfg);
5251 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5252 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
5253 type_from_op (ins, NULL, NULL);
5258 } else if (cmethod->klass == runtime_helpers_class) {
5260 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0) {
5261 EMIT_NEW_ICONST (cfg, ins, G_STRUCT_OFFSET (MonoString, chars));
5265 } else if (cmethod->klass == mono_defaults.thread_class) {
5266 if (strcmp (cmethod->name, "SpinWait_nop") == 0) {
5267 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
5268 MONO_ADD_INS (cfg->cbb, ins);
5270 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0) {
5271 return emit_memory_barrier (cfg, FullBarrier);
5273 } else if (cmethod->klass == mono_defaults.monitor_class) {
5275 /* FIXME this should be integrated to the check below once we support the trampoline version */
5276 #if defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
5277 if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 2) {
5278 MonoMethod *fast_method = NULL;
5280 /* Avoid infinite recursion */
5281 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN && !strcmp (cfg->method->name, "FastMonitorEnterV4"))
5284 fast_method = mono_monitor_get_fast_path (cmethod);
5288 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
5292 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
5293 if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 1) {
5296 if (COMPILE_LLVM (cfg)) {
5298 * Pass the argument normally, the LLVM backend will handle the
5299 * calling convention problems.
5301 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
5303 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
5304 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
5305 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
5306 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
5309 return (MonoInst*)call;
5310 } else if (strcmp (cmethod->name, "Exit") == 0) {
5313 if (COMPILE_LLVM (cfg)) {
5314 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
5316 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
5317 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
5318 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
5319 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
5322 return (MonoInst*)call;
5324 #elif defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
5326 MonoMethod *fast_method = NULL;
5328 /* Avoid infinite recursion */
5329 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN &&
5330 (strcmp (cfg->method->name, "FastMonitorEnter") == 0 ||
5331 strcmp (cfg->method->name, "FastMonitorExit") == 0))
5334 if ((strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 2) ||
5335 strcmp (cmethod->name, "Exit") == 0)
5336 fast_method = mono_monitor_get_fast_path (cmethod);
5340 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
5343 } else if (cmethod->klass->image == mono_defaults.corlib &&
5344 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
5345 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
5348 #if SIZEOF_REGISTER == 8
5349 if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
5350 /* 64 bit reads are already atomic */
5351 MONO_INST_NEW (cfg, ins, OP_LOADI8_MEMBASE);
5352 ins->dreg = mono_alloc_preg (cfg);
5353 ins->inst_basereg = args [0]->dreg;
5354 ins->inst_offset = 0;
5355 MONO_ADD_INS (cfg->cbb, ins);
5359 #ifdef MONO_ARCH_HAVE_ATOMIC_ADD
5360 if (strcmp (cmethod->name, "Increment") == 0) {
5361 MonoInst *ins_iconst;
5364 if (fsig->params [0]->type == MONO_TYPE_I4) {
5365 opcode = OP_ATOMIC_ADD_NEW_I4;
5366 cfg->has_atomic_add_new_i4 = TRUE;
5368 #if SIZEOF_REGISTER == 8
5369 else if (fsig->params [0]->type == MONO_TYPE_I8)
5370 opcode = OP_ATOMIC_ADD_NEW_I8;
5373 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
5374 ins_iconst->inst_c0 = 1;
5375 ins_iconst->dreg = mono_alloc_ireg (cfg);
5376 MONO_ADD_INS (cfg->cbb, ins_iconst);
5378 MONO_INST_NEW (cfg, ins, opcode);
5379 ins->dreg = mono_alloc_ireg (cfg);
5380 ins->inst_basereg = args [0]->dreg;
5381 ins->inst_offset = 0;
5382 ins->sreg2 = ins_iconst->dreg;
5383 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
5384 MONO_ADD_INS (cfg->cbb, ins);
5386 } else if (strcmp (cmethod->name, "Decrement") == 0) {
5387 MonoInst *ins_iconst;
5390 if (fsig->params [0]->type == MONO_TYPE_I4) {
5391 opcode = OP_ATOMIC_ADD_NEW_I4;
5392 cfg->has_atomic_add_new_i4 = TRUE;
5394 #if SIZEOF_REGISTER == 8
5395 else if (fsig->params [0]->type == MONO_TYPE_I8)
5396 opcode = OP_ATOMIC_ADD_NEW_I8;
5399 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
5400 ins_iconst->inst_c0 = -1;
5401 ins_iconst->dreg = mono_alloc_ireg (cfg);
5402 MONO_ADD_INS (cfg->cbb, ins_iconst);
5404 MONO_INST_NEW (cfg, ins, opcode);
5405 ins->dreg = mono_alloc_ireg (cfg);
5406 ins->inst_basereg = args [0]->dreg;
5407 ins->inst_offset = 0;
5408 ins->sreg2 = ins_iconst->dreg;
5409 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
5410 MONO_ADD_INS (cfg->cbb, ins);
5412 } else if (strcmp (cmethod->name, "Add") == 0) {
5415 if (fsig->params [0]->type == MONO_TYPE_I4) {
5416 opcode = OP_ATOMIC_ADD_NEW_I4;
5417 cfg->has_atomic_add_new_i4 = TRUE;
5419 #if SIZEOF_REGISTER == 8
5420 else if (fsig->params [0]->type == MONO_TYPE_I8)
5421 opcode = OP_ATOMIC_ADD_NEW_I8;
5425 MONO_INST_NEW (cfg, ins, opcode);
5426 ins->dreg = mono_alloc_ireg (cfg);
5427 ins->inst_basereg = args [0]->dreg;
5428 ins->inst_offset = 0;
5429 ins->sreg2 = args [1]->dreg;
5430 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
5431 MONO_ADD_INS (cfg->cbb, ins);
5434 #endif /* MONO_ARCH_HAVE_ATOMIC_ADD */
5436 #ifdef MONO_ARCH_HAVE_ATOMIC_EXCHANGE
5437 if (strcmp (cmethod->name, "Exchange") == 0) {
5439 gboolean is_ref = fsig->params [0]->type == MONO_TYPE_OBJECT;
5441 if (fsig->params [0]->type == MONO_TYPE_I4) {
5442 opcode = OP_ATOMIC_EXCHANGE_I4;
5443 cfg->has_atomic_exchange_i4 = TRUE;
5445 #if SIZEOF_REGISTER == 8
5446 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I8) ||
5447 (fsig->params [0]->type == MONO_TYPE_I))
5448 opcode = OP_ATOMIC_EXCHANGE_I8;
5450 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I)) {
5451 opcode = OP_ATOMIC_EXCHANGE_I4;
5452 cfg->has_atomic_exchange_i4 = TRUE;
5458 MONO_INST_NEW (cfg, ins, opcode);
5459 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : mono_alloc_ireg (cfg);
5460 ins->inst_basereg = args [0]->dreg;
5461 ins->inst_offset = 0;
5462 ins->sreg2 = args [1]->dreg;
5463 MONO_ADD_INS (cfg->cbb, ins);
5465 switch (fsig->params [0]->type) {
5467 ins->type = STACK_I4;
5471 ins->type = STACK_I8;
5473 case MONO_TYPE_OBJECT:
5474 ins->type = STACK_OBJ;
5477 g_assert_not_reached ();
5480 if (cfg->gen_write_barriers && is_ref)
5481 emit_write_barrier (cfg, args [0], args [1]);
5483 #endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
5485 #ifdef MONO_ARCH_HAVE_ATOMIC_CAS
5486 if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
5488 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [1]);
5489 if (fsig->params [1]->type == MONO_TYPE_I4)
5491 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I)
5492 size = sizeof (gpointer);
5493 else if (sizeof (gpointer) == 8 && fsig->params [1]->type == MONO_TYPE_I8)
5496 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
5497 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
5498 ins->sreg1 = args [0]->dreg;
5499 ins->sreg2 = args [1]->dreg;
5500 ins->sreg3 = args [2]->dreg;
5501 ins->type = STACK_I4;
5502 MONO_ADD_INS (cfg->cbb, ins);
5503 } else if (size == 8) {
5504 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I8);
5505 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
5506 ins->sreg1 = args [0]->dreg;
5507 ins->sreg2 = args [1]->dreg;
5508 ins->sreg3 = args [2]->dreg;
5509 ins->type = STACK_I8;
5510 MONO_ADD_INS (cfg->cbb, ins);
5512 /* g_assert_not_reached (); */
5514 if (cfg->gen_write_barriers && is_ref)
5515 emit_write_barrier (cfg, args [0], args [1]);
5517 #endif /* MONO_ARCH_HAVE_ATOMIC_CAS */
5519 if (strcmp (cmethod->name, "MemoryBarrier") == 0)
5520 ins = emit_memory_barrier (cfg, FullBarrier);
5524 } else if (cmethod->klass->image == mono_defaults.corlib) {
5525 if (cmethod->name [0] == 'B' && strcmp (cmethod->name, "Break") == 0
5526 && strcmp (cmethod->klass->name, "Debugger") == 0) {
5527 if (should_insert_brekpoint (cfg->method)) {
5528 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
5530 MONO_INST_NEW (cfg, ins, OP_NOP);
5531 MONO_ADD_INS (cfg->cbb, ins);
5535 if (cmethod->name [0] == 'g' && strcmp (cmethod->name, "get_IsRunningOnWindows") == 0
5536 && strcmp (cmethod->klass->name, "Environment") == 0) {
5538 EMIT_NEW_ICONST (cfg, ins, 1);
5540 EMIT_NEW_ICONST (cfg, ins, 0);
5544 } else if (cmethod->klass == mono_defaults.math_class) {
5546 * There is general branches code for Min/Max, but it does not work for
5548 * http://everything2.com/?node_id=1051618
5550 } else if ((!strcmp (cmethod->klass->image->assembly->aname.name, "MonoMac") || !strcmp (cmethod->klass->image->assembly->aname.name, "monotouch")) && !strcmp (cmethod->klass->name, "Selector") && !strcmp (cmethod->name, "GetHandle") && cfg->compile_aot && (args [0]->opcode == OP_GOT_ENTRY || args[0]->opcode == OP_AOTCONST)) {
5551 #ifdef MONO_ARCH_HAVE_OBJC_GET_SELECTOR
5553 MonoJumpInfoToken *ji;
5556 cfg->disable_llvm = TRUE;
5558 if (args [0]->opcode == OP_GOT_ENTRY) {
5559 pi = args [0]->inst_p1;
5560 g_assert (pi->opcode == OP_PATCH_INFO);
5561 g_assert ((int)pi->inst_p1 == MONO_PATCH_INFO_LDSTR);
5564 g_assert ((int)args [0]->inst_p1 == MONO_PATCH_INFO_LDSTR);
5565 ji = args [0]->inst_p0;
5568 NULLIFY_INS (args [0]);
5571 s = mono_ldstr (cfg->domain, ji->image, mono_metadata_token_index (ji->token));
5572 MONO_INST_NEW (cfg, ins, OP_OBJC_GET_SELECTOR);
5573 ins->dreg = mono_alloc_ireg (cfg);
5575 ins->inst_p0 = mono_string_to_utf8 (s);
5576 MONO_ADD_INS (cfg->cbb, ins);
5581 #ifdef MONO_ARCH_SIMD_INTRINSICS
5582 if (cfg->opt & MONO_OPT_SIMD) {
5583 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
5589 ins = mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
5593 if (COMPILE_LLVM (cfg)) {
5594 ins = llvm_emit_inst_for_method (cfg, cmethod, fsig, args);
5599 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
5603 * This entry point could be used later for arbitrary method
5606 inline static MonoInst*
5607 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
5608 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
5610 if (method->klass == mono_defaults.string_class) {
5611 /* managed string allocation support */
5612 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS) && !(cfg->opt & MONO_OPT_SHARED)) {
5613 MonoInst *iargs [2];
5614 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
5615 MonoMethod *managed_alloc = NULL;
5617 g_assert (vtable); /*Should not fail since it System.String*/
5618 #ifndef MONO_CROSS_COMPILE
5619 managed_alloc = mono_gc_get_managed_allocator (method->klass, FALSE);
5623 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
5624 iargs [1] = args [0];
5625 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
5632 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
5634 MonoInst *store, *temp;
5637 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
5638 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
5641 * FIXME: We should use *args++ = sp [0], but that would mean the arg
5642 * would be different than the MonoInst's used to represent arguments, and
5643 * the ldelema implementation can't deal with that.
5644 * Solution: When ldelema is used on an inline argument, create a var for
5645 * it, emit ldelema on that var, and emit the saving code below in
5646 * inline_method () if needed.
5648 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
5649 cfg->args [i] = temp;
5650 /* This uses cfg->args [i] which is set by the preceeding line */
5651 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
5652 store->cil_code = sp [0]->cil_code;
5657 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
5658 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
5660 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
5662 check_inline_called_method_name_limit (MonoMethod *called_method)
5665 static const char *limit = NULL;
5667 if (limit == NULL) {
5668 const char *limit_string = g_getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
5670 if (limit_string != NULL)
5671 limit = limit_string;
5676 if (limit [0] != '\0') {
5677 char *called_method_name = mono_method_full_name (called_method, TRUE);
5679 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
5680 g_free (called_method_name);
5682 //return (strncmp_result <= 0);
5683 return (strncmp_result == 0);
5690 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
5692 check_inline_caller_method_name_limit (MonoMethod *caller_method)
5695 static const char *limit = NULL;
5697 if (limit == NULL) {
5698 const char *limit_string = g_getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
5699 if (limit_string != NULL) {
5700 limit = limit_string;
5706 if (limit [0] != '\0') {
5707 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
5709 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
5710 g_free (caller_method_name);
5712 //return (strncmp_result <= 0);
5713 return (strncmp_result == 0);
5721 emit_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
5723 static double r8_0 = 0.0;
5727 rtype = mini_replace_type (rtype);
5731 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
5732 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
5733 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
5734 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
5735 MONO_EMIT_NEW_I8CONST (cfg, dreg, 0);
5736 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
5737 MONO_INST_NEW (cfg, ins, OP_R8CONST);
5738 ins->type = STACK_R8;
5739 ins->inst_p0 = (void*)&r8_0;
5741 MONO_ADD_INS (cfg->cbb, ins);
5742 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
5743 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
5744 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
5745 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (cfg, rtype)) {
5746 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
5748 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
5753 emit_init_local (MonoCompile *cfg, int local, MonoType *type)
5755 MonoInst *var = cfg->locals [local];
5756 if (COMPILE_SOFT_FLOAT (cfg)) {
5758 int reg = alloc_dreg (cfg, var->type);
5759 emit_init_rvar (cfg, reg, type);
5760 EMIT_NEW_LOCSTORE (cfg, store, local, cfg->cbb->last_ins);
5762 emit_init_rvar (cfg, var->dreg, type);
5767 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
5768 guchar *ip, guint real_offset, GList *dont_inline, gboolean inline_always)
5770 MonoInst *ins, *rvar = NULL;
5771 MonoMethodHeader *cheader;
5772 MonoBasicBlock *ebblock, *sbblock;
5774 MonoMethod *prev_inlined_method;
5775 MonoInst **prev_locals, **prev_args;
5776 MonoType **prev_arg_types;
5777 guint prev_real_offset;
5778 GHashTable *prev_cbb_hash;
5779 MonoBasicBlock **prev_cil_offset_to_bb;
5780 MonoBasicBlock *prev_cbb;
5781 unsigned char* prev_cil_start;
5782 guint32 prev_cil_offset_to_bb_len;
5783 MonoMethod *prev_current_method;
5784 MonoGenericContext *prev_generic_context;
5785 gboolean ret_var_set, prev_ret_var_set, virtual = FALSE;
5787 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
5789 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
5790 if ((! inline_always) && ! check_inline_called_method_name_limit (cmethod))
5793 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
5794 if ((! inline_always) && ! check_inline_caller_method_name_limit (cfg->method))
5798 if (cfg->verbose_level > 2)
5799 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
5801 if (!cmethod->inline_info) {
5802 cfg->stat_inlineable_methods++;
5803 cmethod->inline_info = 1;
5806 /* allocate local variables */
5807 cheader = mono_method_get_header (cmethod);
5809 if (cheader == NULL || mono_loader_get_last_error ()) {
5810 MonoLoaderError *error = mono_loader_get_last_error ();
5813 mono_metadata_free_mh (cheader);
5814 if (inline_always && error)
5815 mono_cfg_set_exception (cfg, error->exception_type);
5817 mono_loader_clear_error ();
5821 /*Must verify before creating locals as it can cause the JIT to assert.*/
5822 if (mono_compile_is_broken (cfg, cmethod, FALSE)) {
5823 mono_metadata_free_mh (cheader);
5827 /* allocate space to store the return value */
5828 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
5829 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
5832 prev_locals = cfg->locals;
5833 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
5834 for (i = 0; i < cheader->num_locals; ++i)
5835 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
5837 /* allocate start and end blocks */
5838 /* This is needed so if the inline is aborted, we can clean up */
5839 NEW_BBLOCK (cfg, sbblock);
5840 sbblock->real_offset = real_offset;
5842 NEW_BBLOCK (cfg, ebblock);
5843 ebblock->block_num = cfg->num_bblocks++;
5844 ebblock->real_offset = real_offset;
5846 prev_args = cfg->args;
5847 prev_arg_types = cfg->arg_types;
5848 prev_inlined_method = cfg->inlined_method;
5849 cfg->inlined_method = cmethod;
5850 cfg->ret_var_set = FALSE;
5851 cfg->inline_depth ++;
5852 prev_real_offset = cfg->real_offset;
5853 prev_cbb_hash = cfg->cbb_hash;
5854 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
5855 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
5856 prev_cil_start = cfg->cil_start;
5857 prev_cbb = cfg->cbb;
5858 prev_current_method = cfg->current_method;
5859 prev_generic_context = cfg->generic_context;
5860 prev_ret_var_set = cfg->ret_var_set;
5862 if (*ip == CEE_CALLVIRT && !(cmethod->flags & METHOD_ATTRIBUTE_STATIC))
5865 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, dont_inline, sp, real_offset, virtual);
5867 ret_var_set = cfg->ret_var_set;
5869 cfg->inlined_method = prev_inlined_method;
5870 cfg->real_offset = prev_real_offset;
5871 cfg->cbb_hash = prev_cbb_hash;
5872 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
5873 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
5874 cfg->cil_start = prev_cil_start;
5875 cfg->locals = prev_locals;
5876 cfg->args = prev_args;
5877 cfg->arg_types = prev_arg_types;
5878 cfg->current_method = prev_current_method;
5879 cfg->generic_context = prev_generic_context;
5880 cfg->ret_var_set = prev_ret_var_set;
5881 cfg->inline_depth --;
5883 if ((costs >= 0 && costs < 60) || inline_always) {
5884 if (cfg->verbose_level > 2)
5885 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
5887 cfg->stat_inlined_methods++;
5889 /* always add some code to avoid block split failures */
5890 MONO_INST_NEW (cfg, ins, OP_NOP);
5891 MONO_ADD_INS (prev_cbb, ins);
5893 prev_cbb->next_bb = sbblock;
5894 link_bblock (cfg, prev_cbb, sbblock);
5897 * Get rid of the begin and end bblocks if possible to aid local
5900 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
5902 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
5903 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
5905 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
5906 MonoBasicBlock *prev = ebblock->in_bb [0];
5907 mono_merge_basic_blocks (cfg, prev, ebblock);
5909 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
5910 mono_merge_basic_blocks (cfg, prev_cbb, prev);
5911 cfg->cbb = prev_cbb;
5915 * Its possible that the rvar is set in some prev bblock, but not in others.
5921 for (i = 0; i < ebblock->in_count; ++i) {
5922 bb = ebblock->in_bb [i];
5924 if (bb->last_ins && bb->last_ins->opcode == OP_NOT_REACHED) {
5927 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
5937 * If the inlined method contains only a throw, then the ret var is not
5938 * set, so set it to a dummy value.
5941 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
5943 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
5946 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
5949 if (cfg->verbose_level > 2)
5950 printf ("INLINE ABORTED %s (cost %d)\n", mono_method_full_name (cmethod, TRUE), costs);
5951 cfg->exception_type = MONO_EXCEPTION_NONE;
5952 mono_loader_clear_error ();
5954 /* This gets rid of the newly added bblocks */
5955 cfg->cbb = prev_cbb;
5957 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
5962 * Some of these comments may well be out-of-date.
5963 * Design decisions: we do a single pass over the IL code (and we do bblock
5964 * splitting/merging in the few cases when it's required: a back jump to an IL
5965 * address that was not already seen as bblock starting point).
5966 * Code is validated as we go (full verification is still better left to metadata/verify.c).
5967 * Complex operations are decomposed in simpler ones right away. We need to let the
5968 * arch-specific code peek and poke inside this process somehow (except when the
5969 * optimizations can take advantage of the full semantic info of coarse opcodes).
5970 * All the opcodes of the form opcode.s are 'normalized' to opcode.
5971 * MonoInst->opcode initially is the IL opcode or some simplification of that
5972 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
5973 * opcode with value bigger than OP_LAST.
5974 * At this point the IR can be handed over to an interpreter, a dumb code generator
5975 * or to the optimizing code generator that will translate it to SSA form.
5977 * Profiling directed optimizations.
5978 * We may compile by default with few or no optimizations and instrument the code
5979 * or the user may indicate what methods to optimize the most either in a config file
5980 * or through repeated runs where the compiler applies offline the optimizations to
5981 * each method and then decides if it was worth it.
5984 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
5985 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
5986 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
5987 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
5988 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
5989 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
5990 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
5991 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) {cfg->exception_ptr = klass; LOAD_ERROR;}
5993 /* offset from br.s -> br like opcodes */
5994 #define BIG_BRANCH_OFFSET 13
5997 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
5999 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
6001 return b == NULL || b == bb;
6005 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
6007 unsigned char *ip = start;
6008 unsigned char *target;
6011 MonoBasicBlock *bblock;
6012 const MonoOpcode *opcode;
6015 cli_addr = ip - start;
6016 i = mono_opcode_value ((const guint8 **)&ip, end);
6019 opcode = &mono_opcodes [i];
6020 switch (opcode->argument) {
6021 case MonoInlineNone:
6024 case MonoInlineString:
6025 case MonoInlineType:
6026 case MonoInlineField:
6027 case MonoInlineMethod:
6030 case MonoShortInlineR:
6037 case MonoShortInlineVar:
6038 case MonoShortInlineI:
6041 case MonoShortInlineBrTarget:
6042 target = start + cli_addr + 2 + (signed char)ip [1];
6043 GET_BBLOCK (cfg, bblock, target);
6046 GET_BBLOCK (cfg, bblock, ip);
6048 case MonoInlineBrTarget:
6049 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
6050 GET_BBLOCK (cfg, bblock, target);
6053 GET_BBLOCK (cfg, bblock, ip);
6055 case MonoInlineSwitch: {
6056 guint32 n = read32 (ip + 1);
6059 cli_addr += 5 + 4 * n;
6060 target = start + cli_addr;
6061 GET_BBLOCK (cfg, bblock, target);
6063 for (j = 0; j < n; ++j) {
6064 target = start + cli_addr + (gint32)read32 (ip);
6065 GET_BBLOCK (cfg, bblock, target);
6075 g_assert_not_reached ();
6078 if (i == CEE_THROW) {
6079 unsigned char *bb_start = ip - 1;
6081 /* Find the start of the bblock containing the throw */
6083 while ((bb_start >= start) && !bblock) {
6084 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
6088 bblock->out_of_line = 1;
6098 static inline MonoMethod *
6099 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
6103 if (m->wrapper_type != MONO_WRAPPER_NONE) {
6104 method = mono_method_get_wrapper_data (m, token);
6106 method = mono_class_inflate_generic_method (method, context);
6108 method = mono_get_method_full (m->klass->image, token, klass, context);
6114 static inline MonoMethod *
6115 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
6117 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
6119 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
6125 static inline MonoClass*
6126 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
6130 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6131 klass = mono_method_get_wrapper_data (method, token);
6133 klass = mono_class_inflate_generic_class (klass, context);
6135 klass = mono_class_get_full (method->klass->image, token, context);
6138 mono_class_init (klass);
6142 static inline MonoMethodSignature*
6143 mini_get_signature (MonoMethod *method, guint32 token, MonoGenericContext *context)
6145 MonoMethodSignature *fsig;
6147 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6150 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
6152 fsig = mono_inflate_generic_signature (fsig, context, &error);
6154 g_assert (mono_error_ok (&error));
6157 fsig = mono_metadata_parse_signature (method->klass->image, token);
6163 * Returns TRUE if the JIT should abort inlining because "callee"
6164 * is influenced by security attributes.
6167 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
6171 if ((cfg->method != caller) && mono_security_method_has_declsec (callee)) {
6175 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
6176 if (result == MONO_JIT_SECURITY_OK)
6179 if (result == MONO_JIT_LINKDEMAND_ECMA) {
6180 /* Generate code to throw a SecurityException before the actual call/link */
6181 MonoSecurityManager *secman = mono_security_manager_get_methods ();
6184 NEW_ICONST (cfg, args [0], 4);
6185 NEW_METHODCONST (cfg, args [1], caller);
6186 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
6187 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
6188 /* don't hide previous results */
6189 mono_cfg_set_exception (cfg, MONO_EXCEPTION_SECURITY_LINKDEMAND);
6190 cfg->exception_data = result;
6198 throw_exception (void)
6200 static MonoMethod *method = NULL;
6203 MonoSecurityManager *secman = mono_security_manager_get_methods ();
6204 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
6211 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
6213 MonoMethod *thrower = throw_exception ();
6216 EMIT_NEW_PCONST (cfg, args [0], ex);
6217 mono_emit_method_call (cfg, thrower, args, NULL);
6221 * Return the original method is a wrapper is specified. We can only access
6222 * the custom attributes from the original method.
6225 get_original_method (MonoMethod *method)
6227 if (method->wrapper_type == MONO_WRAPPER_NONE)
6230 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
6231 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
6234 /* in other cases we need to find the original method */
6235 return mono_marshal_method_from_wrapper (method);
6239 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
6240 MonoBasicBlock *bblock, unsigned char *ip)
6242 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
6243 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
6245 emit_throw_exception (cfg, ex);
6249 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
6250 MonoBasicBlock *bblock, unsigned char *ip)
6252 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
6253 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
6255 emit_throw_exception (cfg, ex);
6259 * Check that the IL instructions at ip are the array initialization
6260 * sequence and return the pointer to the data and the size.
6263 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
6266 * newarr[System.Int32]
6268 * ldtoken field valuetype ...
6269 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
6271 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
6272 guint32 token = read32 (ip + 7);
6273 guint32 field_token = read32 (ip + 2);
6274 guint32 field_index = field_token & 0xffffff;
6276 const char *data_ptr;
6278 MonoMethod *cmethod;
6279 MonoClass *dummy_class;
6280 MonoClassField *field = mono_field_from_token (method->klass->image, field_token, &dummy_class, NULL);
6286 *out_field_token = field_token;
6288 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
6291 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
6293 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
6294 case MONO_TYPE_BOOLEAN:
6298 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
6299 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
6300 case MONO_TYPE_CHAR:
6317 if (size > mono_type_size (field->type, &dummy_align))
6320 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
6321 if (!method->klass->image->dynamic) {
6322 field_index = read32 (ip + 2) & 0xffffff;
6323 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
6324 data_ptr = mono_image_rva_map (method->klass->image, rva);
6325 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
6326 /* for aot code we do the lookup on load */
6327 if (aot && data_ptr)
6328 return GUINT_TO_POINTER (rva);
6330 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
6332 data_ptr = mono_field_get_data (field);
6340 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
6342 char *method_fname = mono_method_full_name (method, TRUE);
6344 MonoMethodHeader *header = mono_method_get_header (method);
6346 if (header->code_size == 0)
6347 method_code = g_strdup ("method body is empty.");
6349 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
6350 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
6351 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
6352 g_free (method_fname);
6353 g_free (method_code);
6354 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
6358 set_exception_object (MonoCompile *cfg, MonoException *exception)
6360 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OBJECT_SUPPLIED);
6361 MONO_GC_REGISTER_ROOT_SINGLE (cfg->exception_ptr);
6362 cfg->exception_ptr = exception;
6366 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
6369 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
6370 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
6371 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
6372 /* Optimize reg-reg moves away */
6374 * Can't optimize other opcodes, since sp[0] might point to
6375 * the last ins of a decomposed opcode.
6377 sp [0]->dreg = (cfg)->locals [n]->dreg;
6379 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
6384 * ldloca inhibits many optimizations so try to get rid of it in common
6387 static inline unsigned char *
6388 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
6398 local = read16 (ip + 2);
6402 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
6403 /* From the INITOBJ case */
6404 token = read32 (ip + 2);
6405 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
6406 CHECK_TYPELOAD (klass);
6407 type = mini_replace_type (&klass->byval_arg);
6408 emit_init_local (cfg, local, type);
6416 is_exception_class (MonoClass *class)
6419 if (class == mono_defaults.exception_class)
6421 class = class->parent;
6427 * is_jit_optimizer_disabled:
6429 * Determine whenever M's assembly has a DebuggableAttribute with the
6430 * IsJITOptimizerDisabled flag set.
6433 is_jit_optimizer_disabled (MonoMethod *m)
6435 MonoAssembly *ass = m->klass->image->assembly;
6436 MonoCustomAttrInfo* attrs;
6437 static MonoClass *klass;
6439 gboolean val = FALSE;
6442 if (ass->jit_optimizer_disabled_inited)
6443 return ass->jit_optimizer_disabled;
6446 klass = mono_class_from_name (mono_defaults.corlib, "System.Diagnostics", "DebuggableAttribute");
6449 ass->jit_optimizer_disabled = FALSE;
6450 mono_memory_barrier ();
6451 ass->jit_optimizer_disabled_inited = TRUE;
6455 attrs = mono_custom_attrs_from_assembly (ass);
6457 for (i = 0; i < attrs->num_attrs; ++i) {
6458 MonoCustomAttrEntry *attr = &attrs->attrs [i];
6461 MonoMethodSignature *sig;
6463 if (!attr->ctor || attr->ctor->klass != klass)
6465 /* Decode the attribute. See reflection.c */
6466 len = attr->data_size;
6467 p = (const char*)attr->data;
6468 g_assert (read16 (p) == 0x0001);
6471 // FIXME: Support named parameters
6472 sig = mono_method_signature (attr->ctor);
6473 if (sig->param_count != 2 || sig->params [0]->type != MONO_TYPE_BOOLEAN || sig->params [1]->type != MONO_TYPE_BOOLEAN)
6475 /* Two boolean arguments */
6479 mono_custom_attrs_free (attrs);
6482 ass->jit_optimizer_disabled = val;
6483 mono_memory_barrier ();
6484 ass->jit_optimizer_disabled_inited = TRUE;
6490 is_supported_tail_call (MonoCompile *cfg, MonoMethod *method, MonoMethod *cmethod, MonoMethodSignature *fsig, int call_opcode)
6492 gboolean supported_tail_call;
6495 #ifdef MONO_ARCH_HAVE_OP_TAIL_CALL
6496 supported_tail_call = mono_arch_tail_call_supported (cfg, mono_method_signature (method), mono_method_signature (cmethod));
6498 supported_tail_call = mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret);
6501 for (i = 0; i < fsig->param_count; ++i) {
6502 if (fsig->params [i]->byref || fsig->params [i]->type == MONO_TYPE_PTR || fsig->params [i]->type == MONO_TYPE_FNPTR)
6503 /* These can point to the current method's stack */
6504 supported_tail_call = FALSE;
6506 if (fsig->hasthis && cmethod->klass->valuetype)
6507 /* this might point to the current method's stack */
6508 supported_tail_call = FALSE;
6509 if (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)
6510 supported_tail_call = FALSE;
6511 if (cfg->method->save_lmf)
6512 supported_tail_call = FALSE;
6513 if (cmethod->wrapper_type && cmethod->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
6514 supported_tail_call = FALSE;
6515 if (call_opcode != CEE_CALL)
6516 supported_tail_call = FALSE;
6518 /* Debugging support */
6520 if (supported_tail_call) {
6521 if (!mono_debug_count ())
6522 supported_tail_call = FALSE;
6526 return supported_tail_call;
6529 /* the JIT intercepts ldflda instructions to the tlsdata field in ThreadLocal<T> and redirects
6530 * it to the thread local value based on the tls_offset field. Every other kind of access to
6531 * the field causes an assert.
6534 is_magic_tls_access (MonoClassField *field)
6536 if (strcmp (field->name, "tlsdata"))
6538 if (strcmp (field->parent->name, "ThreadLocal`1"))
6540 return field->parent->image == mono_defaults.corlib;
6543 /* emits the code needed to access a managed tls var (like ThreadStatic)
6544 * with the value of the tls offset in offset_reg. thread_ins represents the MonoInternalThread
6545 * pointer for the current thread.
6546 * Returns the MonoInst* representing the address of the tls var.
6549 emit_managed_static_data_access (MonoCompile *cfg, MonoInst *thread_ins, int offset_reg)
6552 int static_data_reg, array_reg, dreg;
6553 int offset2_reg, idx_reg;
6554 // inlined access to the tls data
6555 // idx = (offset >> 24) - 1;
6556 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
6557 static_data_reg = alloc_ireg (cfg);
6558 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, G_STRUCT_OFFSET (MonoInternalThread, static_data));
6559 idx_reg = alloc_ireg (cfg);
6560 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
6561 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
6562 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
6563 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
6564 array_reg = alloc_ireg (cfg);
6565 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
6566 offset2_reg = alloc_ireg (cfg);
6567 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
6568 dreg = alloc_ireg (cfg);
6569 EMIT_NEW_BIALU (cfg, addr, OP_PADD, dreg, array_reg, offset2_reg);
6574 * redirect access to the tlsdata field to the tls var given by the tls_offset field.
6575 * this address is cached per-method in cached_tls_addr.
6578 create_magic_tls_access (MonoCompile *cfg, MonoClassField *tls_field, MonoInst **cached_tls_addr, MonoInst *thread_local)
6580 MonoInst *load, *addr, *temp, *store, *thread_ins;
6581 MonoClassField *offset_field;
6583 if (*cached_tls_addr) {
6584 EMIT_NEW_TEMPLOAD (cfg, addr, (*cached_tls_addr)->inst_c0);
6587 thread_ins = mono_get_thread_intrinsic (cfg);
6588 offset_field = mono_class_get_field_from_name (tls_field->parent, "tls_offset");
6590 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, offset_field->type, thread_local->dreg, offset_field->offset);
6592 MONO_ADD_INS (cfg->cbb, thread_ins);
6594 MonoMethod *thread_method;
6595 thread_method = mono_class_get_method_from_name (mono_get_thread_class(), "CurrentInternalThread_internal", 0);
6596 thread_ins = mono_emit_method_call (cfg, thread_method, NULL, NULL);
6598 addr = emit_managed_static_data_access (cfg, thread_ins, load->dreg);
6599 addr->klass = mono_class_from_mono_type (tls_field->type);
6600 addr->type = STACK_MP;
6601 *cached_tls_addr = temp = mono_compile_create_var (cfg, type_from_stack_type (addr), OP_LOCAL);
6602 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, addr);
6604 EMIT_NEW_TEMPLOAD (cfg, addr, temp->inst_c0);
6609 * mono_method_to_ir:
6611 * Translate the .net IL into linear IR.
6614 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
6615 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
6616 guint inline_offset, gboolean is_virtual_call)
6619 MonoInst *ins, **sp, **stack_start;
6620 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
6621 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
6622 MonoMethod *cmethod, *method_definition;
6623 MonoInst **arg_array;
6624 MonoMethodHeader *header;
6626 guint32 token, ins_flag;
6628 MonoClass *constrained_call = NULL;
6629 unsigned char *ip, *end, *target, *err_pos;
6630 MonoMethodSignature *sig;
6631 MonoGenericContext *generic_context = NULL;
6632 MonoGenericContainer *generic_container = NULL;
6633 MonoType **param_types;
6634 int i, n, start_new_bblock, dreg;
6635 int num_calls = 0, inline_costs = 0;
6636 int breakpoint_id = 0;
6638 MonoBoolean security, pinvoke;
6639 MonoSecurityManager* secman = NULL;
6640 MonoDeclSecurityActions actions;
6641 GSList *class_inits = NULL;
6642 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
6644 gboolean init_locals, seq_points, skip_dead_blocks;
6645 gboolean disable_inline, sym_seq_points = FALSE;
6646 MonoInst *cached_tls_addr = NULL;
6647 MonoDebugMethodInfo *minfo;
6648 MonoBitSet *seq_point_locs = NULL;
6649 MonoBitSet *seq_point_set_locs = NULL;
6651 disable_inline = is_jit_optimizer_disabled (method);
6653 /* serialization and xdomain stuff may need access to private fields and methods */
6654 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
6655 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
6656 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
6657 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
6658 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
6659 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
6661 dont_verify |= mono_security_smcs_hack_enabled ();
6663 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
6664 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
6665 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
6666 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
6667 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_STELEMREF;
6669 image = method->klass->image;
6670 header = mono_method_get_header (method);
6672 MonoLoaderError *error;
6674 if ((error = mono_loader_get_last_error ())) {
6675 mono_cfg_set_exception (cfg, error->exception_type);
6677 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
6678 cfg->exception_message = g_strdup_printf ("Missing or incorrect header for method %s", cfg->method->name);
6680 goto exception_exit;
6682 generic_container = mono_method_get_generic_container (method);
6683 sig = mono_method_signature (method);
6684 num_args = sig->hasthis + sig->param_count;
6685 ip = (unsigned char*)header->code;
6686 cfg->cil_start = ip;
6687 end = ip + header->code_size;
6688 cfg->stat_cil_code_size += header->code_size;
6689 init_locals = header->init_locals;
6691 seq_points = cfg->gen_seq_points && cfg->method == method;
6692 #ifdef PLATFORM_ANDROID
6693 seq_points &= cfg->method->wrapper_type == MONO_WRAPPER_NONE;
6696 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
6697 /* We could hit a seq point before attaching to the JIT (#8338) */
6701 if (cfg->gen_seq_points && cfg->method == method) {
6702 minfo = mono_debug_lookup_method (method);
6704 int i, n_il_offsets;
6708 mono_debug_symfile_get_line_numbers_full (minfo, NULL, NULL, &n_il_offsets, &il_offsets, &line_numbers, NULL, NULL);
6709 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
6710 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
6711 sym_seq_points = TRUE;
6712 for (i = 0; i < n_il_offsets; ++i) {
6713 if (il_offsets [i] < header->code_size)
6714 mono_bitset_set_fast (seq_point_locs, il_offsets [i]);
6716 g_free (il_offsets);
6717 g_free (line_numbers);
6722 * Methods without init_locals set could cause asserts in various passes
6727 method_definition = method;
6728 while (method_definition->is_inflated) {
6729 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
6730 method_definition = imethod->declaring;
6733 /* SkipVerification is not allowed if core-clr is enabled */
6734 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
6736 dont_verify_stloc = TRUE;
6739 if (sig->is_inflated)
6740 generic_context = mono_method_get_context (method);
6741 else if (generic_container)
6742 generic_context = &generic_container->context;
6743 cfg->generic_context = generic_context;
6745 if (!cfg->generic_sharing_context)
6746 g_assert (!sig->has_type_parameters);
6748 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
6749 g_assert (method->is_inflated);
6750 g_assert (mono_method_get_context (method)->method_inst);
6752 if (method->is_inflated && mono_method_get_context (method)->method_inst)
6753 g_assert (sig->generic_param_count);
6755 if (cfg->method == method) {
6756 cfg->real_offset = 0;
6758 cfg->real_offset = inline_offset;
6761 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
6762 cfg->cil_offset_to_bb_len = header->code_size;
6764 cfg->current_method = method;
6766 if (cfg->verbose_level > 2)
6767 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
6769 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
6771 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
6772 for (n = 0; n < sig->param_count; ++n)
6773 param_types [n + sig->hasthis] = sig->params [n];
6774 cfg->arg_types = param_types;
6776 dont_inline = g_list_prepend (dont_inline, method);
6777 if (cfg->method == method) {
6779 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
6780 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
6783 NEW_BBLOCK (cfg, start_bblock);
6784 cfg->bb_entry = start_bblock;
6785 start_bblock->cil_code = NULL;
6786 start_bblock->cil_length = 0;
6787 #if defined(__native_client_codegen__)
6788 MONO_INST_NEW (cfg, ins, OP_NACL_GC_SAFE_POINT);
6789 ins->dreg = alloc_dreg (cfg, STACK_I4);
6790 MONO_ADD_INS (start_bblock, ins);
6794 NEW_BBLOCK (cfg, end_bblock);
6795 cfg->bb_exit = end_bblock;
6796 end_bblock->cil_code = NULL;
6797 end_bblock->cil_length = 0;
6798 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
6799 g_assert (cfg->num_bblocks == 2);
6801 arg_array = cfg->args;
6803 if (header->num_clauses) {
6804 cfg->spvars = g_hash_table_new (NULL, NULL);
6805 cfg->exvars = g_hash_table_new (NULL, NULL);
6807 /* handle exception clauses */
6808 for (i = 0; i < header->num_clauses; ++i) {
6809 MonoBasicBlock *try_bb;
6810 MonoExceptionClause *clause = &header->clauses [i];
6811 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
6812 try_bb->real_offset = clause->try_offset;
6813 try_bb->try_start = TRUE;
6814 try_bb->region = ((i + 1) << 8) | clause->flags;
6815 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
6816 tblock->real_offset = clause->handler_offset;
6817 tblock->flags |= BB_EXCEPTION_HANDLER;
6820 * Linking the try block with the EH block hinders inlining as we won't be able to
6821 * merge the bblocks from inlining and produce an artificial hole for no good reason.
6823 if (COMPILE_LLVM (cfg))
6824 link_bblock (cfg, try_bb, tblock);
6826 if (*(ip + clause->handler_offset) == CEE_POP)
6827 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
6829 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
6830 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
6831 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
6832 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
6833 MONO_ADD_INS (tblock, ins);
6835 if (seq_points && clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY) {
6836 /* finally clauses already have a seq point */
6837 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
6838 MONO_ADD_INS (tblock, ins);
6841 /* todo: is a fault block unsafe to optimize? */
6842 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
6843 tblock->flags |= BB_EXCEPTION_UNSAFE;
6847 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
6849 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
6851 /* catch and filter blocks get the exception object on the stack */
6852 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
6853 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
6854 MonoInst *dummy_use;
6856 /* mostly like handle_stack_args (), but just sets the input args */
6857 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
6858 tblock->in_scount = 1;
6859 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
6860 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
6863 * Add a dummy use for the exvar so its liveness info will be
6867 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
6869 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
6870 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
6871 tblock->flags |= BB_EXCEPTION_HANDLER;
6872 tblock->real_offset = clause->data.filter_offset;
6873 tblock->in_scount = 1;
6874 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
6875 /* The filter block shares the exvar with the handler block */
6876 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
6877 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
6878 MONO_ADD_INS (tblock, ins);
6882 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
6883 clause->data.catch_class &&
6884 cfg->generic_sharing_context &&
6885 mono_class_check_context_used (clause->data.catch_class)) {
6887 * In shared generic code with catch
6888 * clauses containing type variables
6889 * the exception handling code has to
6890 * be able to get to the rgctx.
6891 * Therefore we have to make sure that
6892 * the vtable/mrgctx argument (for
6893 * static or generic methods) or the
6894 * "this" argument (for non-static
6895 * methods) are live.
6897 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
6898 mini_method_get_context (method)->method_inst ||
6899 method->klass->valuetype) {
6900 mono_get_vtable_var (cfg);
6902 MonoInst *dummy_use;
6904 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
6909 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
6910 cfg->cbb = start_bblock;
6911 cfg->args = arg_array;
6912 mono_save_args (cfg, sig, inline_args);
6915 /* FIRST CODE BLOCK */
6916 NEW_BBLOCK (cfg, bblock);
6917 bblock->cil_code = ip;
6921 ADD_BBLOCK (cfg, bblock);
6923 if (cfg->method == method) {
6924 breakpoint_id = mono_debugger_method_has_breakpoint (method);
6925 if (breakpoint_id) {
6926 MONO_INST_NEW (cfg, ins, OP_BREAK);
6927 MONO_ADD_INS (bblock, ins);
6931 if (mono_security_cas_enabled ())
6932 secman = mono_security_manager_get_methods ();
6934 security = (secman && mono_security_method_has_declsec (method));
6935 /* at this point having security doesn't mean we have any code to generate */
6936 if (security && (cfg->method == method)) {
6937 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
6938 * And we do not want to enter the next section (with allocation) if we
6939 * have nothing to generate */
6940 security = mono_declsec_get_demands (method, &actions);
6943 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
6944 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
6946 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
6947 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
6948 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
6950 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
6951 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
6955 mono_custom_attrs_free (custom);
6958 custom = mono_custom_attrs_from_class (wrapped->klass);
6959 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
6963 mono_custom_attrs_free (custom);
6966 /* not a P/Invoke after all */
6971 if ((init_locals || (cfg->method == method && (cfg->opt & MONO_OPT_SHARED))) || cfg->compile_aot || security || pinvoke) {
6972 /* we use a separate basic block for the initialization code */
6973 NEW_BBLOCK (cfg, init_localsbb);
6974 cfg->bb_init = init_localsbb;
6975 init_localsbb->real_offset = cfg->real_offset;
6976 start_bblock->next_bb = init_localsbb;
6977 init_localsbb->next_bb = bblock;
6978 link_bblock (cfg, start_bblock, init_localsbb);
6979 link_bblock (cfg, init_localsbb, bblock);
6981 cfg->cbb = init_localsbb;
6983 start_bblock->next_bb = bblock;
6984 link_bblock (cfg, start_bblock, bblock);
6987 if (cfg->gsharedvt && cfg->method == method) {
6988 MonoGSharedVtMethodInfo *info;
6989 MonoInst *var, *locals_var;
6992 info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoGSharedVtMethodInfo));
6993 info->method = cfg->method;
6994 info->count_entries = 16;
6995 info->entries = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
6996 cfg->gsharedvt_info = info;
6998 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
6999 /* prevent it from being register allocated */
7000 //var->flags |= MONO_INST_VOLATILE;
7001 cfg->gsharedvt_info_var = var;
7003 ins = emit_get_rgctx_gsharedvt_method (cfg, mini_method_check_context_used (cfg, method), method, info);
7004 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, var->dreg, ins->dreg);
7006 /* Allocate locals */
7007 locals_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
7008 /* prevent it from being register allocated */
7009 //locals_var->flags |= MONO_INST_VOLATILE;
7010 cfg->gsharedvt_locals_var = locals_var;
7012 dreg = alloc_ireg (cfg);
7013 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, dreg, var->dreg, G_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, locals_size));
7015 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
7016 ins->dreg = locals_var->dreg;
7018 MONO_ADD_INS (cfg->cbb, ins);
7019 cfg->gsharedvt_locals_var_ins = ins;
7021 cfg->flags |= MONO_CFG_HAS_ALLOCA;
7024 ins->flags |= MONO_INST_INIT;
7028 /* at this point we know, if security is TRUE, that some code needs to be generated */
7029 if (security && (cfg->method == method)) {
7032 cfg->stat_cas_demand_generation++;
7034 if (actions.demand.blob) {
7035 /* Add code for SecurityAction.Demand */
7036 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
7037 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
7038 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
7039 mono_emit_method_call (cfg, secman->demand, args, NULL);
7041 if (actions.noncasdemand.blob) {
7042 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
7043 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
7044 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
7045 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
7046 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
7047 mono_emit_method_call (cfg, secman->demand, args, NULL);
7049 if (actions.demandchoice.blob) {
7050 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
7051 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
7052 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
7053 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
7054 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
7058 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
7060 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
7063 if (mono_security_core_clr_enabled ()) {
7064 /* check if this is native code, e.g. an icall or a p/invoke */
7065 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
7066 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
7068 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
7069 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
7071 /* if this ia a native call then it can only be JITted from platform code */
7072 if ((icall || pinvk) && method->klass && method->klass->image) {
7073 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
7074 MonoException *ex = icall ? mono_get_exception_security () :
7075 mono_get_exception_method_access ();
7076 emit_throw_exception (cfg, ex);
7083 CHECK_CFG_EXCEPTION;
7085 if (header->code_size == 0)
7088 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
7093 if (cfg->method == method)
7094 mono_debug_init_method (cfg, bblock, breakpoint_id);
7096 for (n = 0; n < header->num_locals; ++n) {
7097 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
7102 /* We force the vtable variable here for all shared methods
7103 for the possibility that they might show up in a stack
7104 trace where their exact instantiation is needed. */
7105 if (cfg->generic_sharing_context && method == cfg->method) {
7106 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
7107 mini_method_get_context (method)->method_inst ||
7108 method->klass->valuetype) {
7109 mono_get_vtable_var (cfg);
7111 /* FIXME: Is there a better way to do this?
7112 We need the variable live for the duration
7113 of the whole method. */
7114 cfg->args [0]->flags |= MONO_INST_VOLATILE;
7118 /* add a check for this != NULL to inlined methods */
7119 if (is_virtual_call) {
7122 NEW_ARGLOAD (cfg, arg_ins, 0);
7123 MONO_ADD_INS (cfg->cbb, arg_ins);
7124 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
7127 skip_dead_blocks = !dont_verify;
7128 if (skip_dead_blocks) {
7129 original_bb = bb = mono_basic_block_split (method, &error);
7130 if (!mono_error_ok (&error)) {
7131 mono_error_cleanup (&error);
7137 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
7138 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
7141 start_new_bblock = 0;
7144 if (cfg->method == method)
7145 cfg->real_offset = ip - header->code;
7147 cfg->real_offset = inline_offset;
7152 if (start_new_bblock) {
7153 bblock->cil_length = ip - bblock->cil_code;
7154 if (start_new_bblock == 2) {
7155 g_assert (ip == tblock->cil_code);
7157 GET_BBLOCK (cfg, tblock, ip);
7159 bblock->next_bb = tblock;
7162 start_new_bblock = 0;
7163 for (i = 0; i < bblock->in_scount; ++i) {
7164 if (cfg->verbose_level > 3)
7165 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
7166 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
7170 g_slist_free (class_inits);
7173 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
7174 link_bblock (cfg, bblock, tblock);
7175 if (sp != stack_start) {
7176 handle_stack_args (cfg, stack_start, sp - stack_start);
7178 CHECK_UNVERIFIABLE (cfg);
7180 bblock->next_bb = tblock;
7183 for (i = 0; i < bblock->in_scount; ++i) {
7184 if (cfg->verbose_level > 3)
7185 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
7186 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
7189 g_slist_free (class_inits);
7194 if (skip_dead_blocks) {
7195 int ip_offset = ip - header->code;
7197 if (ip_offset == bb->end)
7201 int op_size = mono_opcode_size (ip, end);
7202 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
7204 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
7206 if (ip_offset + op_size == bb->end) {
7207 MONO_INST_NEW (cfg, ins, OP_NOP);
7208 MONO_ADD_INS (bblock, ins);
7209 start_new_bblock = 1;
7217 * Sequence points are points where the debugger can place a breakpoint.
7218 * Currently, we generate these automatically at points where the IL
7221 if (seq_points && ((sp == stack_start) || (sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code)))) {
7223 * Make methods interruptable at the beginning, and at the targets of
7224 * backward branches.
7225 * Also, do this at the start of every bblock in methods with clauses too,
7226 * to be able to handle instructions with inprecise control flow like
7228 * Backward branches are handled at the end of method-to-ir ().
7230 gboolean intr_loc = ip == header->code || (!cfg->cbb->last_ins && cfg->header->num_clauses);
7232 /* Avoid sequence points on empty IL like .volatile */
7233 // FIXME: Enable this
7234 //if (!(cfg->cbb->last_ins && cfg->cbb->last_ins->opcode == OP_SEQ_POINT)) {
7235 NEW_SEQ_POINT (cfg, ins, ip - header->code, intr_loc);
7236 if (sp != stack_start)
7237 ins->flags |= MONO_INST_NONEMPTY_STACK;
7238 MONO_ADD_INS (cfg->cbb, ins);
7241 mono_bitset_set_fast (seq_point_set_locs, ip - header->code);
7244 bblock->real_offset = cfg->real_offset;
7246 if ((cfg->method == method) && cfg->coverage_info) {
7247 guint32 cil_offset = ip - header->code;
7248 cfg->coverage_info->data [cil_offset].cil_code = ip;
7250 /* TODO: Use an increment here */
7251 #if defined(TARGET_X86)
7252 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
7253 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
7255 MONO_ADD_INS (cfg->cbb, ins);
7257 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
7258 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
7262 if (cfg->verbose_level > 3)
7263 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
7267 if (seq_points && !sym_seq_points && sp != stack_start) {
7269 * The C# compiler uses these nops to notify the JIT that it should
7270 * insert seq points.
7272 NEW_SEQ_POINT (cfg, ins, ip - header->code, FALSE);
7273 MONO_ADD_INS (cfg->cbb, ins);
7275 if (cfg->keep_cil_nops)
7276 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
7278 MONO_INST_NEW (cfg, ins, OP_NOP);
7280 MONO_ADD_INS (bblock, ins);
7283 if (should_insert_brekpoint (cfg->method)) {
7284 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
7286 MONO_INST_NEW (cfg, ins, OP_NOP);
7289 MONO_ADD_INS (bblock, ins);
7295 CHECK_STACK_OVF (1);
7296 n = (*ip)-CEE_LDARG_0;
7298 EMIT_NEW_ARGLOAD (cfg, ins, n);
7306 CHECK_STACK_OVF (1);
7307 n = (*ip)-CEE_LDLOC_0;
7309 EMIT_NEW_LOCLOAD (cfg, ins, n);
7318 n = (*ip)-CEE_STLOC_0;
7321 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
7323 emit_stloc_ir (cfg, sp, header, n);
7330 CHECK_STACK_OVF (1);
7333 EMIT_NEW_ARGLOAD (cfg, ins, n);
7339 CHECK_STACK_OVF (1);
7342 NEW_ARGLOADA (cfg, ins, n);
7343 MONO_ADD_INS (cfg->cbb, ins);
7353 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
7355 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
7360 CHECK_STACK_OVF (1);
7363 EMIT_NEW_LOCLOAD (cfg, ins, n);
7367 case CEE_LDLOCA_S: {
7368 unsigned char *tmp_ip;
7370 CHECK_STACK_OVF (1);
7371 CHECK_LOCAL (ip [1]);
7373 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
7379 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
7388 CHECK_LOCAL (ip [1]);
7389 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
7391 emit_stloc_ir (cfg, sp, header, ip [1]);
7396 CHECK_STACK_OVF (1);
7397 EMIT_NEW_PCONST (cfg, ins, NULL);
7398 ins->type = STACK_OBJ;
7403 CHECK_STACK_OVF (1);
7404 EMIT_NEW_ICONST (cfg, ins, -1);
7417 CHECK_STACK_OVF (1);
7418 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
7424 CHECK_STACK_OVF (1);
7426 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
7432 CHECK_STACK_OVF (1);
7433 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
7439 CHECK_STACK_OVF (1);
7440 MONO_INST_NEW (cfg, ins, OP_I8CONST);
7441 ins->type = STACK_I8;
7442 ins->dreg = alloc_dreg (cfg, STACK_I8);
7444 ins->inst_l = (gint64)read64 (ip);
7445 MONO_ADD_INS (bblock, ins);
7451 gboolean use_aotconst = FALSE;
7453 #ifdef TARGET_POWERPC
7454 /* FIXME: Clean this up */
7455 if (cfg->compile_aot)
7456 use_aotconst = TRUE;
7459 /* FIXME: we should really allocate this only late in the compilation process */
7460 f = mono_domain_alloc (cfg->domain, sizeof (float));
7462 CHECK_STACK_OVF (1);
7468 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
7470 dreg = alloc_freg (cfg);
7471 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
7472 ins->type = STACK_R8;
7474 MONO_INST_NEW (cfg, ins, OP_R4CONST);
7475 ins->type = STACK_R8;
7476 ins->dreg = alloc_dreg (cfg, STACK_R8);
7478 MONO_ADD_INS (bblock, ins);
7488 gboolean use_aotconst = FALSE;
7490 #ifdef TARGET_POWERPC
7491 /* FIXME: Clean this up */
7492 if (cfg->compile_aot)
7493 use_aotconst = TRUE;
7496 /* FIXME: we should really allocate this only late in the compilation process */
7497 d = mono_domain_alloc (cfg->domain, sizeof (double));
7499 CHECK_STACK_OVF (1);
7505 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
7507 dreg = alloc_freg (cfg);
7508 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
7509 ins->type = STACK_R8;
7511 MONO_INST_NEW (cfg, ins, OP_R8CONST);
7512 ins->type = STACK_R8;
7513 ins->dreg = alloc_dreg (cfg, STACK_R8);
7515 MONO_ADD_INS (bblock, ins);
7524 MonoInst *temp, *store;
7526 CHECK_STACK_OVF (1);
7530 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
7531 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
7533 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
7536 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
7549 if (sp [0]->type == STACK_R8)
7550 /* we need to pop the value from the x86 FP stack */
7551 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
7557 INLINE_FAILURE ("jmp");
7558 GSHAREDVT_FAILURE (*ip);
7561 if (stack_start != sp)
7563 token = read32 (ip + 1);
7564 /* FIXME: check the signature matches */
7565 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7567 if (!cmethod || mono_loader_get_last_error ())
7570 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
7571 GENERIC_SHARING_FAILURE (CEE_JMP);
7573 if (mono_security_cas_enabled ())
7574 CHECK_CFG_EXCEPTION;
7576 if (ARCH_HAVE_OP_TAIL_CALL) {
7577 MonoMethodSignature *fsig = mono_method_signature (cmethod);
7580 /* Handle tail calls similarly to calls */
7581 n = fsig->param_count + fsig->hasthis;
7585 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
7586 call->method = cmethod;
7587 call->tail_call = TRUE;
7588 call->signature = mono_method_signature (cmethod);
7589 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
7590 call->inst.inst_p0 = cmethod;
7591 for (i = 0; i < n; ++i)
7592 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
7594 mono_arch_emit_call (cfg, call);
7595 MONO_ADD_INS (bblock, (MonoInst*)call);
7597 for (i = 0; i < num_args; ++i)
7598 /* Prevent arguments from being optimized away */
7599 arg_array [i]->flags |= MONO_INST_VOLATILE;
7601 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
7602 ins = (MonoInst*)call;
7603 ins->inst_p0 = cmethod;
7604 MONO_ADD_INS (bblock, ins);
7608 start_new_bblock = 1;
7613 case CEE_CALLVIRT: {
7614 MonoInst *addr = NULL;
7615 MonoMethodSignature *fsig = NULL;
7617 int virtual = *ip == CEE_CALLVIRT;
7618 int calli = *ip == CEE_CALLI;
7619 gboolean pass_imt_from_rgctx = FALSE;
7620 MonoInst *imt_arg = NULL;
7621 MonoInst *keep_this_alive = NULL;
7622 gboolean pass_vtable = FALSE;
7623 gboolean pass_mrgctx = FALSE;
7624 MonoInst *vtable_arg = NULL;
7625 gboolean check_this = FALSE;
7626 gboolean supported_tail_call = FALSE;
7627 gboolean tail_call = FALSE;
7628 gboolean need_seq_point = FALSE;
7629 guint32 call_opcode = *ip;
7630 gboolean emit_widen = TRUE;
7631 gboolean push_res = TRUE;
7632 gboolean skip_ret = FALSE;
7633 gboolean delegate_invoke = FALSE;
7636 token = read32 (ip + 1);
7641 //GSHAREDVT_FAILURE (*ip);
7646 fsig = mini_get_signature (method, token, generic_context);
7647 n = fsig->param_count + fsig->hasthis;
7649 if (method->dynamic && fsig->pinvoke) {
7653 * This is a call through a function pointer using a pinvoke
7654 * signature. Have to create a wrapper and call that instead.
7655 * FIXME: This is very slow, need to create a wrapper at JIT time
7656 * instead based on the signature.
7658 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
7659 EMIT_NEW_PCONST (cfg, args [1], fsig);
7661 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
7664 MonoMethod *cil_method;
7666 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7667 cil_method = cmethod;
7669 if (constrained_call) {
7670 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7671 if (cfg->verbose_level > 2)
7672 printf ("DM Constrained call to %s\n", mono_type_get_full_name (constrained_call));
7673 if (!((constrained_call->byval_arg.type == MONO_TYPE_VAR ||
7674 constrained_call->byval_arg.type == MONO_TYPE_MVAR) &&
7675 cfg->generic_sharing_context)) {
7676 cmethod = mono_get_method_constrained_with_method (image, cil_method, constrained_call, generic_context);
7679 if (cfg->verbose_level > 2)
7680 printf ("Constrained call to %s\n", mono_type_get_full_name (constrained_call));
7682 if ((constrained_call->byval_arg.type == MONO_TYPE_VAR || constrained_call->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
7684 * This is needed since get_method_constrained can't find
7685 * the method in klass representing a type var.
7686 * The type var is guaranteed to be a reference type in this
7689 if (!mini_is_gsharedvt_klass (cfg, constrained_call))
7690 g_assert (!cmethod->klass->valuetype);
7692 cmethod = mono_get_method_constrained (image, token, constrained_call, generic_context, &cil_method);
7697 if (!cmethod || mono_loader_get_last_error ())
7699 if (!dont_verify && !cfg->skip_visibility) {
7700 MonoMethod *target_method = cil_method;
7701 if (method->is_inflated) {
7702 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
7704 if (!mono_method_can_access_method (method_definition, target_method) &&
7705 !mono_method_can_access_method (method, cil_method))
7706 METHOD_ACCESS_FAILURE;
7709 if (mono_security_core_clr_enabled ())
7710 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
7712 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
7713 /* MS.NET seems to silently convert this to a callvirt */
7718 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
7719 * converts to a callvirt.
7721 * tests/bug-515884.il is an example of this behavior
7723 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
7724 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
7725 if (!virtual && mono_class_is_marshalbyref (cmethod->klass) && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
7729 if (!cmethod->klass->inited)
7730 if (!mono_class_init (cmethod->klass))
7731 TYPE_LOAD_ERROR (cmethod->klass);
7733 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
7734 mini_class_is_system_array (cmethod->klass)) {
7735 array_rank = cmethod->klass->rank;
7736 fsig = mono_method_signature (cmethod);
7738 fsig = mono_method_signature (cmethod);
7743 if (fsig->pinvoke) {
7744 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
7745 check_for_pending_exc, cfg->compile_aot);
7746 fsig = mono_method_signature (wrapper);
7747 } else if (constrained_call) {
7748 fsig = mono_method_signature (cmethod);
7750 fsig = mono_method_get_signature_full (cmethod, image, token, generic_context);
7754 mono_save_token_info (cfg, image, token, cil_method);
7756 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
7758 * Need to emit an implicit seq point after every non-void call so single stepping through nested calls like
7759 * foo (bar (), baz ())
7760 * works correctly. MS does this also:
7761 * http://stackoverflow.com/questions/6937198/making-your-net-language-step-correctly-in-the-debugger
7762 * The problem with this approach is that the debugger will stop after all calls returning a value,
7763 * even for simple cases, like:
7766 /* Special case a few common successor opcodes */
7767 if (!(ip + 5 < end && (ip [5] == CEE_POP || ip [5] == CEE_NOP)) && !(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip + 5 - header->code)))
7768 need_seq_point = TRUE;
7771 n = fsig->param_count + fsig->hasthis;
7773 /* Don't support calls made using type arguments for now */
7775 if (cfg->gsharedvt) {
7776 if (mini_is_gsharedvt_signature (cfg, fsig))
7777 GSHAREDVT_FAILURE (*ip);
7781 if (mono_security_cas_enabled ()) {
7782 if (check_linkdemand (cfg, method, cmethod))
7783 INLINE_FAILURE ("linkdemand");
7784 CHECK_CFG_EXCEPTION;
7787 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
7788 g_assert_not_reached ();
7791 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
7794 if (!cfg->generic_sharing_context && cmethod)
7795 g_assert (!mono_method_check_context_used (cmethod));
7799 //g_assert (!virtual || fsig->hasthis);
7803 if (constrained_call) {
7804 if (mini_is_gsharedvt_klass (cfg, constrained_call)) {
7806 * Constrained calls need to behave differently at runtime dependending on whenever the receiver is instantiated as ref type or as a vtype.
7808 if ((cmethod->klass != mono_defaults.object_class) && constrained_call->valuetype && cmethod->klass->valuetype) {
7809 /* The 'Own method' case below */
7810 } else if (cmethod->klass->image != mono_defaults.corlib && !(cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) && !cmethod->klass->valuetype) {
7811 /* 'The type parameter is instantiated as a reference type' case below. */
7812 } else if (((cmethod->klass == mono_defaults.object_class) || (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) || (!cmethod->klass->valuetype && cmethod->klass->image != mono_defaults.corlib)) &&
7813 (MONO_TYPE_IS_VOID (fsig->ret) || MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_IS_REFERENCE (fsig->ret) || mini_is_gsharedvt_type (cfg, fsig->ret)) &&
7814 (fsig->param_count == 0 || (!fsig->hasthis && fsig->param_count == 1) || (fsig->param_count == 1 && (MONO_TYPE_IS_REFERENCE (fsig->params [0]) || mini_is_gsharedvt_type (cfg, fsig->params [0]))))) {
7815 MonoInst *args [16];
7818 * This case handles calls to
7819 * - object:ToString()/Equals()/GetHashCode(),
7820 * - System.IComparable<T>:CompareTo()
7821 * - System.IEquatable<T>:Equals ()
7822 * plus some simple interface calls enough to support AsyncTaskMethodBuilder.
7826 if (mono_method_check_context_used (cmethod))
7827 args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (cmethod), cmethod, MONO_RGCTX_INFO_METHOD);
7829 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
7830 args [2] = emit_get_rgctx_klass (cfg, mono_class_check_context_used (constrained_call), constrained_call, MONO_RGCTX_INFO_KLASS);
7832 /* !fsig->hasthis is for the wrapper for the Object.GetType () icall */
7833 if (fsig->hasthis && fsig->param_count) {
7834 /* Pass the arguments using a localloc-ed array using the format expected by runtime_invoke () */
7835 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
7836 ins->dreg = alloc_preg (cfg);
7837 ins->inst_imm = fsig->param_count * sizeof (mgreg_t);
7838 MONO_ADD_INS (cfg->cbb, ins);
7841 if (mini_is_gsharedvt_type (cfg, fsig->params [0])) {
7844 args [3] = emit_get_gsharedvt_info_klass (cfg, mono_class_from_mono_type (fsig->params [0]), MONO_RGCTX_INFO_CLASS_BOX_TYPE);
7846 EMIT_NEW_VARLOADA_VREG (cfg, ins, sp [1]->dreg, fsig->params [0]);
7847 addr_reg = ins->dreg;
7848 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, addr_reg);
7850 EMIT_NEW_ICONST (cfg, args [3], 0);
7851 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, sp [1]->dreg);
7854 EMIT_NEW_ICONST (cfg, args [3], 0);
7855 EMIT_NEW_ICONST (cfg, args [4], 0);
7857 ins = mono_emit_jit_icall (cfg, mono_gsharedvt_constrained_call, args);
7860 if (mini_is_gsharedvt_type (cfg, fsig->ret)) {
7861 ins = handle_unbox_gsharedvt (cfg, mono_class_from_mono_type (fsig->ret), ins, &bblock);
7862 } else if (MONO_TYPE_IS_PRIMITIVE (fsig->ret)) {
7866 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), ins->dreg, sizeof (MonoObject));
7867 MONO_ADD_INS (cfg->cbb, add);
7869 NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, add->dreg, 0);
7870 MONO_ADD_INS (cfg->cbb, ins);
7871 /* ins represents the call result */
7876 GSHAREDVT_FAILURE (*ip);
7880 * We have the `constrained.' prefix opcode.
7882 if (constrained_call->valuetype && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
7884 * The type parameter is instantiated as a valuetype,
7885 * but that type doesn't override the method we're
7886 * calling, so we need to box `this'.
7888 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
7889 ins->klass = constrained_call;
7890 sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call), &bblock);
7891 CHECK_CFG_EXCEPTION;
7892 } else if (!constrained_call->valuetype) {
7893 int dreg = alloc_ireg_ref (cfg);
7896 * The type parameter is instantiated as a reference
7897 * type. We have a managed pointer on the stack, so
7898 * we need to dereference it here.
7900 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
7901 ins->type = STACK_OBJ;
7904 if (cmethod->klass->valuetype) {
7907 /* Interface method */
7910 mono_class_setup_vtable (constrained_call);
7911 CHECK_TYPELOAD (constrained_call);
7912 ioffset = mono_class_interface_offset (constrained_call, cmethod->klass);
7914 TYPE_LOAD_ERROR (constrained_call);
7915 slot = mono_method_get_vtable_slot (cmethod);
7917 TYPE_LOAD_ERROR (cmethod->klass);
7918 cmethod = constrained_call->vtable [ioffset + slot];
7920 if (cmethod->klass == mono_defaults.enum_class) {
7921 /* Enum implements some interfaces, so treat this as the first case */
7922 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
7923 ins->klass = constrained_call;
7924 sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call), &bblock);
7925 CHECK_CFG_EXCEPTION;
7930 constrained_call = NULL;
7933 if (!calli && check_call_signature (cfg, fsig, sp))
7936 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
7937 if (cmethod && (cmethod->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (cmethod->name, "Invoke"))
7938 delegate_invoke = TRUE;
7941 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_sharable_method (cfg, cmethod, fsig, sp))) {
7943 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
7944 type_to_eval_stack_type ((cfg), fsig->ret, ins);
7952 * If the callee is a shared method, then its static cctor
7953 * might not get called after the call was patched.
7955 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
7956 emit_generic_class_init (cfg, cmethod->klass);
7957 CHECK_TYPELOAD (cmethod->klass);
7961 check_method_sharing (cfg, cmethod, &pass_vtable, &pass_mrgctx);
7963 if (cfg->generic_sharing_context && cmethod) {
7964 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
7966 context_used = mini_method_check_context_used (cfg, cmethod);
7968 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
7969 /* Generic method interface
7970 calls are resolved via a
7971 helper function and don't
7973 if (!cmethod_context || !cmethod_context->method_inst)
7974 pass_imt_from_rgctx = TRUE;
7978 * If a shared method calls another
7979 * shared method then the caller must
7980 * have a generic sharing context
7981 * because the magic trampoline
7982 * requires it. FIXME: We shouldn't
7983 * have to force the vtable/mrgctx
7984 * variable here. Instead there
7985 * should be a flag in the cfg to
7986 * request a generic sharing context.
7989 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
7990 mono_get_vtable_var (cfg);
7995 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7997 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7999 CHECK_TYPELOAD (cmethod->klass);
8000 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
8005 g_assert (!vtable_arg);
8007 if (!cfg->compile_aot) {
8009 * emit_get_rgctx_method () calls mono_class_vtable () so check
8010 * for type load errors before.
8012 mono_class_setup_vtable (cmethod->klass);
8013 CHECK_TYPELOAD (cmethod->klass);
8016 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
8018 /* !marshalbyref is needed to properly handle generic methods + remoting */
8019 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
8020 MONO_METHOD_IS_FINAL (cmethod)) &&
8021 !mono_class_is_marshalbyref (cmethod->klass)) {
8028 if (pass_imt_from_rgctx) {
8029 g_assert (!pass_vtable);
8032 imt_arg = emit_get_rgctx_method (cfg, context_used,
8033 cmethod, MONO_RGCTX_INFO_METHOD);
8037 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
8039 /* Calling virtual generic methods */
8040 if (cmethod && virtual &&
8041 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
8042 !(MONO_METHOD_IS_FINAL (cmethod) &&
8043 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
8044 fsig->generic_param_count &&
8045 !(cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))) {
8046 MonoInst *this_temp, *this_arg_temp, *store;
8047 MonoInst *iargs [4];
8048 gboolean use_imt = FALSE;
8050 g_assert (fsig->is_inflated);
8052 /* Prevent inlining of methods that contain indirect calls */
8053 INLINE_FAILURE ("virtual generic call");
8055 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))
8056 GSHAREDVT_FAILURE (*ip);
8058 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK && defined(MONO_ARCH_GSHARED_SUPPORTED)
8059 if (cmethod->wrapper_type == MONO_WRAPPER_NONE && mono_use_imt)
8064 g_assert (!imt_arg);
8066 g_assert (cmethod->is_inflated);
8067 imt_arg = emit_get_rgctx_method (cfg, context_used,
8068 cmethod, MONO_RGCTX_INFO_METHOD);
8069 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, sp [0], imt_arg, NULL);
8071 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
8072 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
8073 MONO_ADD_INS (bblock, store);
8075 /* FIXME: This should be a managed pointer */
8076 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
8078 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
8079 iargs [1] = emit_get_rgctx_method (cfg, context_used,
8080 cmethod, MONO_RGCTX_INFO_METHOD);
8081 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
8082 addr = mono_emit_jit_icall (cfg,
8083 mono_helper_compile_generic_method, iargs);
8085 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
8087 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8094 * Implement a workaround for the inherent races involved in locking:
8100 * If a thread abort happens between the call to Monitor.Enter () and the start of the
8101 * try block, the Exit () won't be executed, see:
8102 * http://www.bluebytesoftware.com/blog/2007/01/30/MonitorEnterThreadAbortsAndOrphanedLocks.aspx
8103 * To work around this, we extend such try blocks to include the last x bytes
8104 * of the Monitor.Enter () call.
8106 if (cmethod && cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
8107 MonoBasicBlock *tbb;
8109 GET_BBLOCK (cfg, tbb, ip + 5);
8111 * Only extend try blocks with a finally, to avoid catching exceptions thrown
8112 * from Monitor.Enter like ArgumentNullException.
8114 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
8115 /* Mark this bblock as needing to be extended */
8116 tbb->extend_try_block = TRUE;
8120 /* Conversion to a JIT intrinsic */
8121 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
8123 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8124 type_to_eval_stack_type ((cfg), fsig->ret, ins);
8131 if (cmethod && (cfg->opt & MONO_OPT_INLINE) &&
8132 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
8133 !disable_inline && mono_method_check_inlining (cfg, cmethod) &&
8134 !g_list_find (dont_inline, cmethod)) {
8136 gboolean always = FALSE;
8138 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
8139 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
8140 /* Prevent inlining of methods that call wrappers */
8141 INLINE_FAILURE ("wrapper call");
8142 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
8146 costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, always);
8148 cfg->real_offset += 5;
8151 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8152 /* *sp is already set by inline_method */
8157 inline_costs += costs;
8163 /* Tail recursion elimination */
8164 if ((cfg->opt & MONO_OPT_TAILC) && call_opcode == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
8165 gboolean has_vtargs = FALSE;
8168 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
8169 INLINE_FAILURE ("tail call");
8171 /* keep it simple */
8172 for (i = fsig->param_count - 1; i >= 0; i--) {
8173 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
8178 for (i = 0; i < n; ++i)
8179 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
8180 MONO_INST_NEW (cfg, ins, OP_BR);
8181 MONO_ADD_INS (bblock, ins);
8182 tblock = start_bblock->out_bb [0];
8183 link_bblock (cfg, bblock, tblock);
8184 ins->inst_target_bb = tblock;
8185 start_new_bblock = 1;
8187 /* skip the CEE_RET, too */
8188 if (ip_in_bb (cfg, bblock, ip + 5))
8195 inline_costs += 10 * num_calls++;
8198 * Making generic calls out of gsharedvt methods.
8200 if (cmethod && cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
8201 MonoRgctxInfoType info_type;
8204 //if (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)
8205 //GSHAREDVT_FAILURE (*ip);
8206 // disable for possible remoting calls
8207 if (fsig->hasthis && (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class))
8208 GSHAREDVT_FAILURE (*ip);
8209 if (fsig->generic_param_count) {
8210 /* virtual generic call */
8211 g_assert (mono_use_imt);
8212 g_assert (!imt_arg);
8213 /* Same as the virtual generic case above */
8214 imt_arg = emit_get_rgctx_method (cfg, context_used,
8215 cmethod, MONO_RGCTX_INFO_METHOD);
8216 /* This is not needed, as the trampoline code will pass one, and it might be passed in the same reg as the imt arg */
8221 if (cmethod->klass->rank && cmethod->klass->byval_arg.type != MONO_TYPE_SZARRAY)
8222 /* test_0_multi_dim_arrays () in gshared.cs */
8223 GSHAREDVT_FAILURE (*ip);
8225 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (cmethod->name, "Invoke")))
8226 keep_this_alive = sp [0];
8228 if (virtual && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))
8229 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE_VIRT;
8231 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE;
8232 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, info_type);
8234 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
8236 } else if (calli && cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
8238 * We pass the address to the gsharedvt trampoline in the rgctx reg
8240 MonoInst *callee = addr;
8242 if (method->wrapper_type != MONO_WRAPPER_DELEGATE_INVOKE)
8244 GSHAREDVT_FAILURE (*ip);
8246 addr = emit_get_rgctx_sig (cfg, context_used,
8247 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
8248 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, callee);
8252 /* Generic sharing */
8253 /* FIXME: only do this for generic methods if
8254 they are not shared! */
8255 if (context_used && !imt_arg && !array_rank && !delegate_invoke &&
8256 (!mono_method_is_generic_sharable (cmethod, TRUE) ||
8257 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
8258 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
8259 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
8260 INLINE_FAILURE ("gshared");
8262 g_assert (cfg->generic_sharing_context && cmethod);
8266 * We are compiling a call to a
8267 * generic method from shared code,
8268 * which means that we have to look up
8269 * the method in the rgctx and do an
8273 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
8275 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
8276 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
8280 /* Indirect calls */
8282 if (call_opcode == CEE_CALL)
8283 g_assert (context_used);
8284 else if (call_opcode == CEE_CALLI)
8285 g_assert (!vtable_arg);
8287 /* FIXME: what the hell is this??? */
8288 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
8289 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
8291 /* Prevent inlining of methods with indirect calls */
8292 INLINE_FAILURE ("indirect call");
8294 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST || addr->opcode == OP_GOT_ENTRY) {
8299 * Instead of emitting an indirect call, emit a direct call
8300 * with the contents of the aotconst as the patch info.
8302 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST) {
8303 info_type = addr->inst_c1;
8304 info_data = addr->inst_p0;
8306 info_type = addr->inst_right->inst_c1;
8307 info_data = addr->inst_right->inst_left;
8310 if (info_type == MONO_PATCH_INFO_ICALL_ADDR || info_type == MONO_PATCH_INFO_JIT_ICALL_ADDR) {
8311 ins = (MonoInst*)mono_emit_abs_call (cfg, info_type, info_data, fsig, sp);
8316 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
8324 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
8325 MonoInst *val = sp [fsig->param_count];
8327 if (val->type == STACK_OBJ) {
8328 MonoInst *iargs [2];
8333 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
8336 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
8337 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, val->dreg);
8338 if (cfg->gen_write_barriers && val->type == STACK_OBJ && !(val->opcode == OP_PCONST && val->inst_c0 == 0))
8339 emit_write_barrier (cfg, addr, val);
8340 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
8341 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
8343 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
8344 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
8345 if (!cmethod->klass->element_class->valuetype && !readonly)
8346 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
8347 CHECK_TYPELOAD (cmethod->klass);
8350 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
8353 g_assert_not_reached ();
8360 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
8364 /* Tail prefix / tail call optimization */
8366 /* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests */
8367 /* FIXME: runtime generic context pointer for jumps? */
8368 /* FIXME: handle this for generic sharing eventually */
8369 if (cmethod && (ins_flag & MONO_INST_TAILCALL) &&
8370 !vtable_arg && !cfg->generic_sharing_context && is_supported_tail_call (cfg, method, cmethod, fsig, call_opcode))
8371 supported_tail_call = TRUE;
8373 if (supported_tail_call) {
8376 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
8377 INLINE_FAILURE ("tail call");
8379 //printf ("HIT: %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
8381 if (ARCH_HAVE_OP_TAIL_CALL) {
8382 /* Handle tail calls similarly to normal calls */
8385 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
8386 call->tail_call = TRUE;
8387 call->method = cmethod;
8388 call->signature = mono_method_signature (cmethod);
8391 * We implement tail calls by storing the actual arguments into the
8392 * argument variables, then emitting a CEE_JMP.
8394 for (i = 0; i < n; ++i) {
8395 /* Prevent argument from being register allocated */
8396 arg_array [i]->flags |= MONO_INST_VOLATILE;
8397 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
8399 ins = (MonoInst*)call;
8400 ins->inst_p0 = cmethod;
8401 ins->inst_p1 = arg_array [0];
8402 MONO_ADD_INS (bblock, ins);
8403 link_bblock (cfg, bblock, end_bblock);
8404 start_new_bblock = 1;
8406 // FIXME: Eliminate unreachable epilogs
8409 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
8410 * only reachable from this call.
8412 GET_BBLOCK (cfg, tblock, ip + 5);
8413 if (tblock == bblock || tblock->in_count == 0)
8422 * Synchronized wrappers.
8423 * Its hard to determine where to replace a method with its synchronized
8424 * wrapper without causing an infinite recursion. The current solution is
8425 * to add the synchronized wrapper in the trampolines, and to
8426 * change the called method to a dummy wrapper, and resolve that wrapper
8427 * to the real method in mono_jit_compile_method ().
8429 if (cfg->method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
8430 MonoMethod *orig = mono_marshal_method_from_wrapper (cfg->method);
8431 if (cmethod == orig || (cmethod->is_inflated && mono_method_get_declaring_generic_method (cmethod) == orig))
8432 cmethod = mono_marshal_get_synchronized_inner_wrapper (cmethod);
8436 INLINE_FAILURE ("call");
8437 ins = mono_emit_method_call_full (cfg, cmethod, fsig, tail_call, sp, virtual ? sp [0] : NULL,
8438 imt_arg, vtable_arg);
8441 link_bblock (cfg, bblock, end_bblock);
8442 start_new_bblock = 1;
8444 // FIXME: Eliminate unreachable epilogs
8447 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
8448 * only reachable from this call.
8450 GET_BBLOCK (cfg, tblock, ip + 5);
8451 if (tblock == bblock || tblock->in_count == 0)
8458 /* End of call, INS should contain the result of the call, if any */
8460 if (push_res && !MONO_TYPE_IS_VOID (fsig->ret)) {
8463 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
8468 if (keep_this_alive) {
8469 MonoInst *dummy_use;
8471 /* See mono_emit_method_call_full () */
8472 EMIT_NEW_DUMMY_USE (cfg, dummy_use, keep_this_alive);
8475 CHECK_CFG_EXCEPTION;
8479 g_assert (*ip == CEE_RET);
8483 constrained_call = NULL;
8485 emit_seq_point (cfg, method, ip, FALSE, TRUE);
8489 if (cfg->method != method) {
8490 /* return from inlined method */
8492 * If in_count == 0, that means the ret is unreachable due to
8493 * being preceeded by a throw. In that case, inline_method () will
8494 * handle setting the return value
8495 * (test case: test_0_inline_throw ()).
8497 if (return_var && cfg->cbb->in_count) {
8498 MonoType *ret_type = mono_method_signature (method)->ret;
8504 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
8507 //g_assert (returnvar != -1);
8508 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
8509 cfg->ret_var_set = TRUE;
8512 if (cfg->lmf_var && cfg->cbb->in_count)
8516 MonoType *ret_type = mini_replace_type (mono_method_signature (method)->ret);
8518 if (seq_points && !sym_seq_points) {
8520 * Place a seq point here too even through the IL stack is not
8521 * empty, so a step over on
8524 * will work correctly.
8526 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
8527 MONO_ADD_INS (cfg->cbb, ins);
8530 g_assert (!return_var);
8534 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
8537 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
8540 if (!cfg->vret_addr) {
8543 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
8545 EMIT_NEW_RETLOADA (cfg, ret_addr);
8547 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
8548 ins->klass = mono_class_from_mono_type (ret_type);
8551 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
8552 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
8553 MonoInst *iargs [1];
8557 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
8558 mono_arch_emit_setret (cfg, method, conv);
8560 mono_arch_emit_setret (cfg, method, *sp);
8563 mono_arch_emit_setret (cfg, method, *sp);
8568 if (sp != stack_start)
8570 MONO_INST_NEW (cfg, ins, OP_BR);
8572 ins->inst_target_bb = end_bblock;
8573 MONO_ADD_INS (bblock, ins);
8574 link_bblock (cfg, bblock, end_bblock);
8575 start_new_bblock = 1;
8579 MONO_INST_NEW (cfg, ins, OP_BR);
8581 target = ip + 1 + (signed char)(*ip);
8583 GET_BBLOCK (cfg, tblock, target);
8584 link_bblock (cfg, bblock, tblock);
8585 ins->inst_target_bb = tblock;
8586 if (sp != stack_start) {
8587 handle_stack_args (cfg, stack_start, sp - stack_start);
8589 CHECK_UNVERIFIABLE (cfg);
8591 MONO_ADD_INS (bblock, ins);
8592 start_new_bblock = 1;
8593 inline_costs += BRANCH_COST;
8607 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
8609 target = ip + 1 + *(signed char*)ip;
8615 inline_costs += BRANCH_COST;
8619 MONO_INST_NEW (cfg, ins, OP_BR);
8622 target = ip + 4 + (gint32)read32(ip);
8624 GET_BBLOCK (cfg, tblock, target);
8625 link_bblock (cfg, bblock, tblock);
8626 ins->inst_target_bb = tblock;
8627 if (sp != stack_start) {
8628 handle_stack_args (cfg, stack_start, sp - stack_start);
8630 CHECK_UNVERIFIABLE (cfg);
8633 MONO_ADD_INS (bblock, ins);
8635 start_new_bblock = 1;
8636 inline_costs += BRANCH_COST;
8643 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
8644 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
8645 guint32 opsize = is_short ? 1 : 4;
8647 CHECK_OPSIZE (opsize);
8649 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
8652 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
8657 GET_BBLOCK (cfg, tblock, target);
8658 link_bblock (cfg, bblock, tblock);
8659 GET_BBLOCK (cfg, tblock, ip);
8660 link_bblock (cfg, bblock, tblock);
8662 if (sp != stack_start) {
8663 handle_stack_args (cfg, stack_start, sp - stack_start);
8664 CHECK_UNVERIFIABLE (cfg);
8667 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
8668 cmp->sreg1 = sp [0]->dreg;
8669 type_from_op (cmp, sp [0], NULL);
8672 #if SIZEOF_REGISTER == 4
8673 if (cmp->opcode == OP_LCOMPARE_IMM) {
8674 /* Convert it to OP_LCOMPARE */
8675 MONO_INST_NEW (cfg, ins, OP_I8CONST);
8676 ins->type = STACK_I8;
8677 ins->dreg = alloc_dreg (cfg, STACK_I8);
8679 MONO_ADD_INS (bblock, ins);
8680 cmp->opcode = OP_LCOMPARE;
8681 cmp->sreg2 = ins->dreg;
8684 MONO_ADD_INS (bblock, cmp);
8686 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
8687 type_from_op (ins, sp [0], NULL);
8688 MONO_ADD_INS (bblock, ins);
8689 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
8690 GET_BBLOCK (cfg, tblock, target);
8691 ins->inst_true_bb = tblock;
8692 GET_BBLOCK (cfg, tblock, ip);
8693 ins->inst_false_bb = tblock;
8694 start_new_bblock = 2;
8697 inline_costs += BRANCH_COST;
8712 MONO_INST_NEW (cfg, ins, *ip);
8714 target = ip + 4 + (gint32)read32(ip);
8720 inline_costs += BRANCH_COST;
8724 MonoBasicBlock **targets;
8725 MonoBasicBlock *default_bblock;
8726 MonoJumpInfoBBTable *table;
8727 int offset_reg = alloc_preg (cfg);
8728 int target_reg = alloc_preg (cfg);
8729 int table_reg = alloc_preg (cfg);
8730 int sum_reg = alloc_preg (cfg);
8731 gboolean use_op_switch;
8735 n = read32 (ip + 1);
8738 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
8742 CHECK_OPSIZE (n * sizeof (guint32));
8743 target = ip + n * sizeof (guint32);
8745 GET_BBLOCK (cfg, default_bblock, target);
8746 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
8748 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
8749 for (i = 0; i < n; ++i) {
8750 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
8751 targets [i] = tblock;
8752 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
8756 if (sp != stack_start) {
8758 * Link the current bb with the targets as well, so handle_stack_args
8759 * will set their in_stack correctly.
8761 link_bblock (cfg, bblock, default_bblock);
8762 for (i = 0; i < n; ++i)
8763 link_bblock (cfg, bblock, targets [i]);
8765 handle_stack_args (cfg, stack_start, sp - stack_start);
8767 CHECK_UNVERIFIABLE (cfg);
8770 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
8771 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
8774 for (i = 0; i < n; ++i)
8775 link_bblock (cfg, bblock, targets [i]);
8777 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
8778 table->table = targets;
8779 table->table_size = n;
8781 use_op_switch = FALSE;
8783 /* ARM implements SWITCH statements differently */
8784 /* FIXME: Make it use the generic implementation */
8785 if (!cfg->compile_aot)
8786 use_op_switch = TRUE;
8789 if (COMPILE_LLVM (cfg))
8790 use_op_switch = TRUE;
8792 cfg->cbb->has_jump_table = 1;
8794 if (use_op_switch) {
8795 MONO_INST_NEW (cfg, ins, OP_SWITCH);
8796 ins->sreg1 = src1->dreg;
8797 ins->inst_p0 = table;
8798 ins->inst_many_bb = targets;
8799 ins->klass = GUINT_TO_POINTER (n);
8800 MONO_ADD_INS (cfg->cbb, ins);
8802 if (sizeof (gpointer) == 8)
8803 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
8805 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
8807 #if SIZEOF_REGISTER == 8
8808 /* The upper word might not be zero, and we add it to a 64 bit address later */
8809 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
8812 if (cfg->compile_aot) {
8813 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
8815 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
8816 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
8817 ins->inst_p0 = table;
8818 ins->dreg = table_reg;
8819 MONO_ADD_INS (cfg->cbb, ins);
8822 /* FIXME: Use load_memindex */
8823 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
8824 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
8825 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
8827 start_new_bblock = 1;
8828 inline_costs += (BRANCH_COST * 2);
8848 dreg = alloc_freg (cfg);
8851 dreg = alloc_lreg (cfg);
8854 dreg = alloc_ireg_ref (cfg);
8857 dreg = alloc_preg (cfg);
8860 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
8861 ins->type = ldind_type [*ip - CEE_LDIND_I1];
8862 ins->flags |= ins_flag;
8864 MONO_ADD_INS (bblock, ins);
8866 if (ins->flags & MONO_INST_VOLATILE) {
8867 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
8868 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
8869 emit_memory_barrier (cfg, FullBarrier);
8884 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
8885 ins->flags |= ins_flag;
8888 if (ins->flags & MONO_INST_VOLATILE) {
8889 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
8890 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
8891 emit_memory_barrier (cfg, FullBarrier);
8894 MONO_ADD_INS (bblock, ins);
8896 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0)))
8897 emit_write_barrier (cfg, sp [0], sp [1]);
8906 MONO_INST_NEW (cfg, ins, (*ip));
8908 ins->sreg1 = sp [0]->dreg;
8909 ins->sreg2 = sp [1]->dreg;
8910 type_from_op (ins, sp [0], sp [1]);
8912 ins->dreg = alloc_dreg ((cfg), (ins)->type);
8914 /* Use the immediate opcodes if possible */
8915 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
8916 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
8917 if (imm_opcode != -1) {
8918 ins->opcode = imm_opcode;
8919 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
8922 sp [1]->opcode = OP_NOP;
8926 MONO_ADD_INS ((cfg)->cbb, (ins));
8928 *sp++ = mono_decompose_opcode (cfg, ins);
8945 MONO_INST_NEW (cfg, ins, (*ip));
8947 ins->sreg1 = sp [0]->dreg;
8948 ins->sreg2 = sp [1]->dreg;
8949 type_from_op (ins, sp [0], sp [1]);
8951 ADD_WIDEN_OP (ins, sp [0], sp [1]);
8952 ins->dreg = alloc_dreg ((cfg), (ins)->type);
8954 /* FIXME: Pass opcode to is_inst_imm */
8956 /* Use the immediate opcodes if possible */
8957 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
8960 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
8961 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
8962 /* Keep emulated opcodes which are optimized away later */
8963 if ((ins->opcode == OP_IREM_UN || ins->opcode == OP_IDIV_UN_IMM) && (cfg->opt & (MONO_OPT_CONSPROP | MONO_OPT_COPYPROP)) && sp [1]->opcode == OP_ICONST && mono_is_power_of_two (sp [1]->inst_c0) >= 0) {
8964 imm_opcode = mono_op_to_op_imm (ins->opcode);
8967 if (imm_opcode != -1) {
8968 ins->opcode = imm_opcode;
8969 if (sp [1]->opcode == OP_I8CONST) {
8970 #if SIZEOF_REGISTER == 8
8971 ins->inst_imm = sp [1]->inst_l;
8973 ins->inst_ls_word = sp [1]->inst_ls_word;
8974 ins->inst_ms_word = sp [1]->inst_ms_word;
8978 ins->inst_imm = (gssize)(sp [1]->inst_c0);
8981 /* Might be followed by an instruction added by ADD_WIDEN_OP */
8982 if (sp [1]->next == NULL)
8983 sp [1]->opcode = OP_NOP;
8986 MONO_ADD_INS ((cfg)->cbb, (ins));
8988 *sp++ = mono_decompose_opcode (cfg, ins);
9001 case CEE_CONV_OVF_I8:
9002 case CEE_CONV_OVF_U8:
9006 /* Special case this earlier so we have long constants in the IR */
9007 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
9008 int data = sp [-1]->inst_c0;
9009 sp [-1]->opcode = OP_I8CONST;
9010 sp [-1]->type = STACK_I8;
9011 #if SIZEOF_REGISTER == 8
9012 if ((*ip) == CEE_CONV_U8)
9013 sp [-1]->inst_c0 = (guint32)data;
9015 sp [-1]->inst_c0 = data;
9017 sp [-1]->inst_ls_word = data;
9018 if ((*ip) == CEE_CONV_U8)
9019 sp [-1]->inst_ms_word = 0;
9021 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
9023 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
9030 case CEE_CONV_OVF_I4:
9031 case CEE_CONV_OVF_I1:
9032 case CEE_CONV_OVF_I2:
9033 case CEE_CONV_OVF_I:
9034 case CEE_CONV_OVF_U:
9037 if (sp [-1]->type == STACK_R8) {
9038 ADD_UNOP (CEE_CONV_OVF_I8);
9045 case CEE_CONV_OVF_U1:
9046 case CEE_CONV_OVF_U2:
9047 case CEE_CONV_OVF_U4:
9050 if (sp [-1]->type == STACK_R8) {
9051 ADD_UNOP (CEE_CONV_OVF_U8);
9058 case CEE_CONV_OVF_I1_UN:
9059 case CEE_CONV_OVF_I2_UN:
9060 case CEE_CONV_OVF_I4_UN:
9061 case CEE_CONV_OVF_I8_UN:
9062 case CEE_CONV_OVF_U1_UN:
9063 case CEE_CONV_OVF_U2_UN:
9064 case CEE_CONV_OVF_U4_UN:
9065 case CEE_CONV_OVF_U8_UN:
9066 case CEE_CONV_OVF_I_UN:
9067 case CEE_CONV_OVF_U_UN:
9074 CHECK_CFG_EXCEPTION;
9078 case CEE_ADD_OVF_UN:
9080 case CEE_MUL_OVF_UN:
9082 case CEE_SUB_OVF_UN:
9088 GSHAREDVT_FAILURE (*ip);
9091 token = read32 (ip + 1);
9092 klass = mini_get_class (method, token, generic_context);
9093 CHECK_TYPELOAD (klass);
9095 if (generic_class_is_reference_type (cfg, klass)) {
9096 MonoInst *store, *load;
9097 int dreg = alloc_ireg_ref (cfg);
9099 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
9100 load->flags |= ins_flag;
9101 MONO_ADD_INS (cfg->cbb, load);
9103 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
9104 store->flags |= ins_flag;
9105 MONO_ADD_INS (cfg->cbb, store);
9107 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER)
9108 emit_write_barrier (cfg, sp [0], sp [1]);
9110 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
9122 token = read32 (ip + 1);
9123 klass = mini_get_class (method, token, generic_context);
9124 CHECK_TYPELOAD (klass);
9126 /* Optimize the common ldobj+stloc combination */
9136 loc_index = ip [5] - CEE_STLOC_0;
9143 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
9144 CHECK_LOCAL (loc_index);
9146 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
9147 ins->dreg = cfg->locals [loc_index]->dreg;
9153 /* Optimize the ldobj+stobj combination */
9154 /* The reference case ends up being a load+store anyway */
9155 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass)) {
9160 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
9167 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
9176 CHECK_STACK_OVF (1);
9178 n = read32 (ip + 1);
9180 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
9181 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
9182 ins->type = STACK_OBJ;
9185 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
9186 MonoInst *iargs [1];
9188 EMIT_NEW_PCONST (cfg, iargs [0], mono_method_get_wrapper_data (method, n));
9189 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
9191 if (cfg->opt & MONO_OPT_SHARED) {
9192 MonoInst *iargs [3];
9194 if (cfg->compile_aot) {
9195 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
9197 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
9198 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
9199 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
9200 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
9201 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
9203 if (bblock->out_of_line) {
9204 MonoInst *iargs [2];
9206 if (image == mono_defaults.corlib) {
9208 * Avoid relocations in AOT and save some space by using a
9209 * version of helper_ldstr specialized to mscorlib.
9211 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
9212 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
9214 /* Avoid creating the string object */
9215 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
9216 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
9217 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
9221 if (cfg->compile_aot) {
9222 NEW_LDSTRCONST (cfg, ins, image, n);
9224 MONO_ADD_INS (bblock, ins);
9227 NEW_PCONST (cfg, ins, NULL);
9228 ins->type = STACK_OBJ;
9229 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
9231 OUT_OF_MEMORY_FAILURE;
9234 MONO_ADD_INS (bblock, ins);
9243 MonoInst *iargs [2];
9244 MonoMethodSignature *fsig;
9247 MonoInst *vtable_arg = NULL;
9250 token = read32 (ip + 1);
9251 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
9252 if (!cmethod || mono_loader_get_last_error ())
9254 fsig = mono_method_get_signature (cmethod, image, token);
9258 mono_save_token_info (cfg, image, token, cmethod);
9260 if (!mono_class_init (cmethod->klass))
9261 TYPE_LOAD_ERROR (cmethod->klass);
9263 context_used = mini_method_check_context_used (cfg, cmethod);
9265 if (mono_security_cas_enabled ()) {
9266 if (check_linkdemand (cfg, method, cmethod))
9267 INLINE_FAILURE ("linkdemand");
9268 CHECK_CFG_EXCEPTION;
9269 } else if (mono_security_core_clr_enabled ()) {
9270 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9273 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
9274 emit_generic_class_init (cfg, cmethod->klass);
9275 CHECK_TYPELOAD (cmethod->klass);
9279 if (cfg->gsharedvt) {
9280 if (mini_is_gsharedvt_variable_signature (sig))
9281 GSHAREDVT_FAILURE (*ip);
9285 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
9286 mono_method_is_generic_sharable (cmethod, TRUE)) {
9287 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
9288 mono_class_vtable (cfg->domain, cmethod->klass);
9289 CHECK_TYPELOAD (cmethod->klass);
9291 vtable_arg = emit_get_rgctx_method (cfg, context_used,
9292 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
9295 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
9296 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
9298 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
9300 CHECK_TYPELOAD (cmethod->klass);
9301 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
9306 n = fsig->param_count;
9310 * Generate smaller code for the common newobj <exception> instruction in
9311 * argument checking code.
9313 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
9314 is_exception_class (cmethod->klass) && n <= 2 &&
9315 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
9316 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
9317 MonoInst *iargs [3];
9319 g_assert (!vtable_arg);
9323 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
9326 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
9330 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
9335 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
9338 g_assert_not_reached ();
9346 /* move the args to allow room for 'this' in the first position */
9352 /* check_call_signature () requires sp[0] to be set */
9353 this_ins.type = STACK_OBJ;
9355 if (check_call_signature (cfg, fsig, sp))
9360 if (mini_class_is_system_array (cmethod->klass)) {
9361 g_assert (!vtable_arg);
9363 *sp = emit_get_rgctx_method (cfg, context_used,
9364 cmethod, MONO_RGCTX_INFO_METHOD);
9366 /* Avoid varargs in the common case */
9367 if (fsig->param_count == 1)
9368 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
9369 else if (fsig->param_count == 2)
9370 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
9371 else if (fsig->param_count == 3)
9372 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
9373 else if (fsig->param_count == 4)
9374 alloc = mono_emit_jit_icall (cfg, mono_array_new_4, sp);
9376 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
9377 } else if (cmethod->string_ctor) {
9378 g_assert (!context_used);
9379 g_assert (!vtable_arg);
9380 /* we simply pass a null pointer */
9381 EMIT_NEW_PCONST (cfg, *sp, NULL);
9382 /* now call the string ctor */
9383 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, NULL, NULL, NULL);
9385 MonoInst* callvirt_this_arg = NULL;
9387 if (cmethod->klass->valuetype) {
9388 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
9389 emit_init_rvar (cfg, iargs [0]->dreg, &cmethod->klass->byval_arg);
9390 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
9395 * The code generated by mini_emit_virtual_call () expects
9396 * iargs [0] to be a boxed instance, but luckily the vcall
9397 * will be transformed into a normal call there.
9399 } else if (context_used) {
9400 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
9403 MonoVTable *vtable = NULL;
9405 if (!cfg->compile_aot)
9406 vtable = mono_class_vtable (cfg->domain, cmethod->klass);
9407 CHECK_TYPELOAD (cmethod->klass);
9410 * TypeInitializationExceptions thrown from the mono_runtime_class_init
9411 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
9412 * As a workaround, we call class cctors before allocating objects.
9414 if (mini_field_access_needs_cctor_run (cfg, method, cmethod->klass, vtable) && !(g_slist_find (class_inits, cmethod->klass))) {
9415 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, cmethod->klass, helper_sig_class_init_trampoline, NULL);
9416 if (cfg->verbose_level > 2)
9417 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
9418 class_inits = g_slist_prepend (class_inits, cmethod->klass);
9421 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
9424 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
9427 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
9429 /* Now call the actual ctor */
9430 /* Avoid virtual calls to ctors if possible */
9431 if (mono_class_is_marshalbyref (cmethod->klass))
9432 callvirt_this_arg = sp [0];
9435 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
9436 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9437 type_to_eval_stack_type ((cfg), fsig->ret, ins);
9442 CHECK_CFG_EXCEPTION;
9443 } else if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
9444 !disable_inline && mono_method_check_inlining (cfg, cmethod) &&
9445 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE) &&
9446 !g_list_find (dont_inline, cmethod)) {
9449 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, FALSE))) {
9450 cfg->real_offset += 5;
9453 inline_costs += costs - 5;
9455 INLINE_FAILURE ("inline failure");
9456 // FIXME-VT: Clean this up
9457 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))
9458 GSHAREDVT_FAILURE(*ip);
9459 mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, callvirt_this_arg, NULL, NULL);
9461 } else if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
9464 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE);
9465 mono_emit_calli (cfg, fsig, sp, addr, NULL, vtable_arg);
9466 } else if (context_used &&
9467 (!mono_method_is_generic_sharable (cmethod, TRUE) ||
9468 !mono_class_generic_sharing_enabled (cmethod->klass))) {
9469 MonoInst *cmethod_addr;
9471 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
9472 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
9474 mono_emit_calli (cfg, fsig, sp, cmethod_addr, NULL, vtable_arg);
9476 INLINE_FAILURE ("ctor call");
9477 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp,
9478 callvirt_this_arg, NULL, vtable_arg);
9482 if (alloc == NULL) {
9484 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
9485 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
9499 token = read32 (ip + 1);
9500 klass = mini_get_class (method, token, generic_context);
9501 CHECK_TYPELOAD (klass);
9502 if (sp [0]->type != STACK_OBJ)
9505 context_used = mini_class_check_context_used (cfg, klass);
9507 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
9508 MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
9515 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
9518 if (cfg->compile_aot)
9519 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
9521 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
9523 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
9525 save_cast_details (cfg, klass, sp [0]->dreg, TRUE, &bblock);
9526 *sp++ = mono_emit_method_call (cfg, mono_castclass, args, NULL);
9527 reset_cast_details (cfg);
9530 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
9531 MonoMethod *mono_castclass;
9532 MonoInst *iargs [1];
9535 mono_castclass = mono_marshal_get_castclass (klass);
9538 save_cast_details (cfg, klass, sp [0]->dreg, TRUE, &bblock);
9539 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
9540 iargs, ip, cfg->real_offset, dont_inline, TRUE);
9541 reset_cast_details (cfg);
9542 CHECK_CFG_EXCEPTION;
9543 g_assert (costs > 0);
9546 cfg->real_offset += 5;
9551 inline_costs += costs;
9554 ins = handle_castclass (cfg, klass, *sp, context_used);
9555 CHECK_CFG_EXCEPTION;
9565 token = read32 (ip + 1);
9566 klass = mini_get_class (method, token, generic_context);
9567 CHECK_TYPELOAD (klass);
9568 if (sp [0]->type != STACK_OBJ)
9571 context_used = mini_class_check_context_used (cfg, klass);
9573 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
9574 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
9581 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
9584 if (cfg->compile_aot)
9585 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
9587 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
9589 *sp++ = mono_emit_method_call (cfg, mono_isinst, args, NULL);
9592 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
9593 MonoMethod *mono_isinst;
9594 MonoInst *iargs [1];
9597 mono_isinst = mono_marshal_get_isinst (klass);
9600 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
9601 iargs, ip, cfg->real_offset, dont_inline, TRUE);
9602 CHECK_CFG_EXCEPTION;
9603 g_assert (costs > 0);
9606 cfg->real_offset += 5;
9611 inline_costs += costs;
9614 ins = handle_isinst (cfg, klass, *sp, context_used);
9615 CHECK_CFG_EXCEPTION;
9622 case CEE_UNBOX_ANY: {
9626 token = read32 (ip + 1);
9627 klass = mini_get_class (method, token, generic_context);
9628 CHECK_TYPELOAD (klass);
9630 mono_save_token_info (cfg, image, token, klass);
9632 context_used = mini_class_check_context_used (cfg, klass);
9634 if (mini_is_gsharedvt_klass (cfg, klass)) {
9635 *sp = handle_unbox_gsharedvt (cfg, klass, *sp, &bblock);
9643 if (generic_class_is_reference_type (cfg, klass)) {
9644 /* CASTCLASS FIXME kill this huge slice of duplicated code*/
9645 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
9646 MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
9653 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
9656 /*FIXME AOT support*/
9657 if (cfg->compile_aot)
9658 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
9660 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
9662 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
9663 *sp++ = mono_emit_method_call (cfg, mono_castclass, args, NULL);
9666 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
9667 MonoMethod *mono_castclass;
9668 MonoInst *iargs [1];
9671 mono_castclass = mono_marshal_get_castclass (klass);
9674 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
9675 iargs, ip, cfg->real_offset, dont_inline, TRUE);
9676 CHECK_CFG_EXCEPTION;
9677 g_assert (costs > 0);
9680 cfg->real_offset += 5;
9684 inline_costs += costs;
9686 ins = handle_castclass (cfg, klass, *sp, context_used);
9687 CHECK_CFG_EXCEPTION;
9695 if (mono_class_is_nullable (klass)) {
9696 ins = handle_unbox_nullable (cfg, *sp, klass, context_used);
9703 ins = handle_unbox (cfg, klass, sp, context_used);
9709 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
9722 token = read32 (ip + 1);
9723 klass = mini_get_class (method, token, generic_context);
9724 CHECK_TYPELOAD (klass);
9726 mono_save_token_info (cfg, image, token, klass);
9728 context_used = mini_class_check_context_used (cfg, klass);
9730 if (generic_class_is_reference_type (cfg, klass)) {
9736 if (klass == mono_defaults.void_class)
9738 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
9740 /* frequent check in generic code: box (struct), brtrue */
9742 // FIXME: LLVM can't handle the inconsistent bb linking
9743 if (!mono_class_is_nullable (klass) &&
9744 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) &&
9745 (ip [5] == CEE_BRTRUE ||
9746 ip [5] == CEE_BRTRUE_S ||
9747 ip [5] == CEE_BRFALSE ||
9748 ip [5] == CEE_BRFALSE_S)) {
9749 gboolean is_true = ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S;
9751 MonoBasicBlock *true_bb, *false_bb;
9755 if (cfg->verbose_level > 3) {
9756 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
9757 printf ("<box+brtrue opt>\n");
9765 target = ip + 1 + (signed char)(*ip);
9772 target = ip + 4 + (gint)(read32 (ip));
9776 g_assert_not_reached ();
9780 * We need to link both bblocks, since it is needed for handling stack
9781 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
9782 * Branching to only one of them would lead to inconsistencies, so
9783 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
9785 GET_BBLOCK (cfg, true_bb, target);
9786 GET_BBLOCK (cfg, false_bb, ip);
9788 mono_link_bblock (cfg, cfg->cbb, true_bb);
9789 mono_link_bblock (cfg, cfg->cbb, false_bb);
9791 if (sp != stack_start) {
9792 handle_stack_args (cfg, stack_start, sp - stack_start);
9794 CHECK_UNVERIFIABLE (cfg);
9797 if (COMPILE_LLVM (cfg)) {
9798 dreg = alloc_ireg (cfg);
9799 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
9800 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
9802 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
9804 /* The JIT can't eliminate the iconst+compare */
9805 MONO_INST_NEW (cfg, ins, OP_BR);
9806 ins->inst_target_bb = is_true ? true_bb : false_bb;
9807 MONO_ADD_INS (cfg->cbb, ins);
9810 start_new_bblock = 1;
9814 *sp++ = handle_box (cfg, val, klass, context_used, &bblock);
9816 CHECK_CFG_EXCEPTION;
9825 token = read32 (ip + 1);
9826 klass = mini_get_class (method, token, generic_context);
9827 CHECK_TYPELOAD (klass);
9829 mono_save_token_info (cfg, image, token, klass);
9831 context_used = mini_class_check_context_used (cfg, klass);
9833 if (mono_class_is_nullable (klass)) {
9836 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
9837 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
9841 ins = handle_unbox (cfg, klass, sp, context_used);
9854 MonoClassField *field;
9855 #ifndef DISABLE_REMOTING
9859 gboolean is_instance;
9861 gpointer addr = NULL;
9862 gboolean is_special_static;
9864 MonoInst *store_val = NULL;
9865 MonoInst *thread_ins;
9868 is_instance = (op == CEE_LDFLD || op == CEE_LDFLDA || op == CEE_STFLD);
9870 if (op == CEE_STFLD) {
9878 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
9880 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
9883 if (op == CEE_STSFLD) {
9891 token = read32 (ip + 1);
9892 if (method->wrapper_type != MONO_WRAPPER_NONE) {
9893 field = mono_method_get_wrapper_data (method, token);
9894 klass = field->parent;
9897 field = mono_field_from_token (image, token, &klass, generic_context);
9901 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
9902 FIELD_ACCESS_FAILURE;
9903 mono_class_init (klass);
9905 if (is_instance && *ip != CEE_LDFLDA && is_magic_tls_access (field))
9908 /* if the class is Critical then transparent code cannot access it's fields */
9909 if (!is_instance && mono_security_core_clr_enabled ())
9910 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
9912 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
9913 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
9914 if (mono_security_core_clr_enabled ())
9915 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
9919 * LDFLD etc. is usable on static fields as well, so convert those cases to
9922 if (is_instance && field->type->attrs & FIELD_ATTRIBUTE_STATIC) {
9934 g_assert_not_reached ();
9936 is_instance = FALSE;
9939 context_used = mini_class_check_context_used (cfg, klass);
9943 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
9944 if (op == CEE_STFLD) {
9945 if (target_type_is_incompatible (cfg, field->type, sp [1]))
9947 #ifndef DISABLE_REMOTING
9948 if ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class) {
9949 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
9950 MonoInst *iargs [5];
9952 GSHAREDVT_FAILURE (op);
9955 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
9956 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
9957 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
9961 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
9962 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
9963 iargs, ip, cfg->real_offset, dont_inline, TRUE);
9964 CHECK_CFG_EXCEPTION;
9965 g_assert (costs > 0);
9967 cfg->real_offset += 5;
9970 inline_costs += costs;
9972 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
9979 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
9981 if (mini_is_gsharedvt_klass (cfg, klass)) {
9982 MonoInst *offset_ins;
9984 context_used = mini_class_check_context_used (cfg, klass);
9986 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
9987 dreg = alloc_ireg_mp (cfg);
9988 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
9989 /* The decomposition will call mini_emit_stobj () which will emit a wbarrier if needed */
9990 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, dreg, 0, sp [1]->dreg);
9992 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
9994 if (sp [0]->opcode != OP_LDADDR)
9995 store->flags |= MONO_INST_FAULT;
9997 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
9998 /* insert call to write barrier */
10002 dreg = alloc_ireg_mp (cfg);
10003 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
10004 emit_write_barrier (cfg, ptr, sp [1]);
10007 store->flags |= ins_flag;
10014 #ifndef DISABLE_REMOTING
10015 if (is_instance && ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class)) {
10016 MonoMethod *wrapper = (op == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
10017 MonoInst *iargs [4];
10019 GSHAREDVT_FAILURE (op);
10021 iargs [0] = sp [0];
10022 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10023 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
10024 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
10025 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
10026 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
10027 iargs, ip, cfg->real_offset, dont_inline, TRUE);
10028 CHECK_CFG_EXCEPTION;
10030 g_assert (costs > 0);
10032 cfg->real_offset += 5;
10036 inline_costs += costs;
10038 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
10044 if (sp [0]->type == STACK_VTYPE) {
10047 /* Have to compute the address of the variable */
10049 var = get_vreg_to_inst (cfg, sp [0]->dreg);
10051 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
10053 g_assert (var->klass == klass);
10055 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
10059 if (op == CEE_LDFLDA) {
10060 if (is_magic_tls_access (field)) {
10061 GSHAREDVT_FAILURE (*ip);
10063 *sp++ = create_magic_tls_access (cfg, field, &cached_tls_addr, ins);
10065 if (sp [0]->type == STACK_OBJ) {
10066 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
10067 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
10070 dreg = alloc_ireg_mp (cfg);
10072 if (mini_is_gsharedvt_klass (cfg, klass)) {
10073 MonoInst *offset_ins;
10075 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10076 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10078 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
10080 ins->klass = mono_class_from_mono_type (field->type);
10081 ins->type = STACK_MP;
10087 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
10089 if (mini_is_gsharedvt_klass (cfg, klass)) {
10090 MonoInst *offset_ins;
10092 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10093 dreg = alloc_ireg_mp (cfg);
10094 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10095 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, dreg, 0);
10097 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
10099 load->flags |= ins_flag;
10100 if (sp [0]->opcode != OP_LDADDR)
10101 load->flags |= MONO_INST_FAULT;
10115 * We can only support shared generic static
10116 * field access on architectures where the
10117 * trampoline code has been extended to handle
10118 * the generic class init.
10120 #ifndef MONO_ARCH_VTABLE_REG
10121 GENERIC_SHARING_FAILURE (op);
10124 context_used = mini_class_check_context_used (cfg, klass);
10126 ftype = mono_field_get_type (field);
10128 if (ftype->attrs & FIELD_ATTRIBUTE_LITERAL)
10131 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
10132 * to be called here.
10134 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
10135 mono_class_vtable (cfg->domain, klass);
10136 CHECK_TYPELOAD (klass);
10138 mono_domain_lock (cfg->domain);
10139 if (cfg->domain->special_static_fields)
10140 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
10141 mono_domain_unlock (cfg->domain);
10143 is_special_static = mono_class_field_is_special_static (field);
10145 if (is_special_static && ((gsize)addr & 0x80000000) == 0)
10146 thread_ins = mono_get_thread_intrinsic (cfg);
10150 /* Generate IR to compute the field address */
10151 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && thread_ins && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
10153 * Fast access to TLS data
10154 * Inline version of get_thread_static_data () in
10158 int idx, static_data_reg, array_reg, dreg;
10160 GSHAREDVT_FAILURE (op);
10162 // offset &= 0x7fffffff;
10163 // idx = (offset >> 24) - 1;
10164 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
10165 MONO_ADD_INS (cfg->cbb, thread_ins);
10166 static_data_reg = alloc_ireg (cfg);
10167 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, G_STRUCT_OFFSET (MonoInternalThread, static_data));
10169 if (cfg->compile_aot) {
10170 int offset_reg, offset2_reg, idx_reg;
10172 /* For TLS variables, this will return the TLS offset */
10173 EMIT_NEW_SFLDACONST (cfg, ins, field);
10174 offset_reg = ins->dreg;
10175 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
10176 idx_reg = alloc_ireg (cfg);
10177 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
10178 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
10179 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
10180 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
10181 array_reg = alloc_ireg (cfg);
10182 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
10183 offset2_reg = alloc_ireg (cfg);
10184 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
10185 dreg = alloc_ireg (cfg);
10186 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
10188 offset = (gsize)addr & 0x7fffffff;
10189 idx = (offset >> 24) - 1;
10191 array_reg = alloc_ireg (cfg);
10192 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
10193 dreg = alloc_ireg (cfg);
10194 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, (offset & 0xffffff));
10196 } else if ((cfg->opt & MONO_OPT_SHARED) ||
10197 (cfg->compile_aot && is_special_static) ||
10198 (context_used && is_special_static)) {
10199 MonoInst *iargs [2];
10201 g_assert (field->parent);
10202 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10203 if (context_used) {
10204 iargs [1] = emit_get_rgctx_field (cfg, context_used,
10205 field, MONO_RGCTX_INFO_CLASS_FIELD);
10207 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
10209 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
10210 } else if (context_used) {
10211 MonoInst *static_data;
10214 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
10215 method->klass->name_space, method->klass->name, method->name,
10216 depth, field->offset);
10219 if (mono_class_needs_cctor_run (klass, method))
10220 emit_generic_class_init (cfg, klass);
10223 * The pointer we're computing here is
10225 * super_info.static_data + field->offset
10227 static_data = emit_get_rgctx_klass (cfg, context_used,
10228 klass, MONO_RGCTX_INFO_STATIC_DATA);
10230 if (mini_is_gsharedvt_klass (cfg, klass)) {
10231 MonoInst *offset_ins;
10233 offset_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10234 dreg = alloc_ireg_mp (cfg);
10235 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, static_data->dreg, offset_ins->dreg);
10236 } else if (field->offset == 0) {
10239 int addr_reg = mono_alloc_preg (cfg);
10240 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
10242 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
10243 MonoInst *iargs [2];
10245 g_assert (field->parent);
10246 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10247 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
10248 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
10250 MonoVTable *vtable = NULL;
10252 if (!cfg->compile_aot)
10253 vtable = mono_class_vtable (cfg->domain, klass);
10254 CHECK_TYPELOAD (klass);
10257 if (mini_field_access_needs_cctor_run (cfg, method, klass, vtable)) {
10258 if (!(g_slist_find (class_inits, klass))) {
10259 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, klass, helper_sig_class_init_trampoline, NULL);
10260 if (cfg->verbose_level > 2)
10261 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
10262 class_inits = g_slist_prepend (class_inits, klass);
10265 if (cfg->run_cctors) {
10267 /* This makes so that inline cannot trigger */
10268 /* .cctors: too many apps depend on them */
10269 /* running with a specific order... */
10271 if (! vtable->initialized)
10272 INLINE_FAILURE ("class init");
10273 ex = mono_runtime_class_init_full (vtable, FALSE);
10275 set_exception_object (cfg, ex);
10276 goto exception_exit;
10280 if (cfg->compile_aot)
10281 EMIT_NEW_SFLDACONST (cfg, ins, field);
10284 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
10286 EMIT_NEW_PCONST (cfg, ins, addr);
10289 MonoInst *iargs [1];
10290 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
10291 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
10295 /* Generate IR to do the actual load/store operation */
10297 if (op == CEE_LDSFLDA) {
10298 ins->klass = mono_class_from_mono_type (ftype);
10299 ins->type = STACK_PTR;
10301 } else if (op == CEE_STSFLD) {
10304 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, ftype, ins->dreg, 0, store_val->dreg);
10305 store->flags |= ins_flag;
10307 gboolean is_const = FALSE;
10308 MonoVTable *vtable = NULL;
10309 gpointer addr = NULL;
10311 if (!context_used) {
10312 vtable = mono_class_vtable (cfg->domain, klass);
10313 CHECK_TYPELOAD (klass);
10315 if ((ftype->attrs & FIELD_ATTRIBUTE_INIT_ONLY) && (((addr = mono_aot_readonly_field_override (field)) != NULL) ||
10316 (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) && vtable->initialized))) {
10317 int ro_type = ftype->type;
10319 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
10320 if (ro_type == MONO_TYPE_VALUETYPE && ftype->data.klass->enumtype) {
10321 ro_type = mono_class_enum_basetype (ftype->data.klass)->type;
10324 GSHAREDVT_FAILURE (op);
10326 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
10329 case MONO_TYPE_BOOLEAN:
10331 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
10335 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
10338 case MONO_TYPE_CHAR:
10340 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
10344 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
10349 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
10353 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
10358 case MONO_TYPE_PTR:
10359 case MONO_TYPE_FNPTR:
10360 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
10361 type_to_eval_stack_type ((cfg), field->type, *sp);
10364 case MONO_TYPE_STRING:
10365 case MONO_TYPE_OBJECT:
10366 case MONO_TYPE_CLASS:
10367 case MONO_TYPE_SZARRAY:
10368 case MONO_TYPE_ARRAY:
10369 if (!mono_gc_is_moving ()) {
10370 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
10371 type_to_eval_stack_type ((cfg), field->type, *sp);
10379 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
10384 case MONO_TYPE_VALUETYPE:
10394 CHECK_STACK_OVF (1);
10396 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
10397 load->flags |= ins_flag;
10410 token = read32 (ip + 1);
10411 klass = mini_get_class (method, token, generic_context);
10412 CHECK_TYPELOAD (klass);
10413 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
10414 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
10415 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
10416 generic_class_is_reference_type (cfg, klass)) {
10417 /* insert call to write barrier */
10418 emit_write_barrier (cfg, sp [0], sp [1]);
10430 const char *data_ptr;
10432 guint32 field_token;
10438 token = read32 (ip + 1);
10440 klass = mini_get_class (method, token, generic_context);
10441 CHECK_TYPELOAD (klass);
10443 context_used = mini_class_check_context_used (cfg, klass);
10445 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
10446 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_OVF_U4);
10447 ins->sreg1 = sp [0]->dreg;
10448 ins->type = STACK_I4;
10449 ins->dreg = alloc_ireg (cfg);
10450 MONO_ADD_INS (cfg->cbb, ins);
10451 *sp = mono_decompose_opcode (cfg, ins);
10454 if (context_used) {
10455 MonoInst *args [3];
10456 MonoClass *array_class = mono_array_class_get (klass, 1);
10457 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class);
10459 /* FIXME: Use OP_NEWARR and decompose later to help abcrem */
10462 args [0] = emit_get_rgctx_klass (cfg, context_used,
10463 array_class, MONO_RGCTX_INFO_VTABLE);
10468 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
10470 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
10472 if (cfg->opt & MONO_OPT_SHARED) {
10473 /* Decompose now to avoid problems with references to the domainvar */
10474 MonoInst *iargs [3];
10476 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10477 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10478 iargs [2] = sp [0];
10480 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
10482 /* Decompose later since it is needed by abcrem */
10483 MonoClass *array_type = mono_array_class_get (klass, 1);
10484 mono_class_vtable (cfg->domain, array_type);
10485 CHECK_TYPELOAD (array_type);
10487 MONO_INST_NEW (cfg, ins, OP_NEWARR);
10488 ins->dreg = alloc_ireg_ref (cfg);
10489 ins->sreg1 = sp [0]->dreg;
10490 ins->inst_newa_class = klass;
10491 ins->type = STACK_OBJ;
10492 ins->klass = array_type;
10493 MONO_ADD_INS (cfg->cbb, ins);
10494 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
10495 cfg->cbb->has_array_access = TRUE;
10497 /* Needed so mono_emit_load_get_addr () gets called */
10498 mono_get_got_var (cfg);
10508 * we inline/optimize the initialization sequence if possible.
10509 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
10510 * for small sizes open code the memcpy
10511 * ensure the rva field is big enough
10513 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
10514 MonoMethod *memcpy_method = get_memcpy_method ();
10515 MonoInst *iargs [3];
10516 int add_reg = alloc_ireg_mp (cfg);
10518 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, G_STRUCT_OFFSET (MonoArray, vector));
10519 if (cfg->compile_aot) {
10520 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
10522 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
10524 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
10525 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
10534 if (sp [0]->type != STACK_OBJ)
10537 MONO_INST_NEW (cfg, ins, OP_LDLEN);
10538 ins->dreg = alloc_preg (cfg);
10539 ins->sreg1 = sp [0]->dreg;
10540 ins->type = STACK_I4;
10541 /* This flag will be inherited by the decomposition */
10542 ins->flags |= MONO_INST_FAULT;
10543 MONO_ADD_INS (cfg->cbb, ins);
10544 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
10545 cfg->cbb->has_array_access = TRUE;
10553 if (sp [0]->type != STACK_OBJ)
10556 cfg->flags |= MONO_CFG_HAS_LDELEMA;
10558 klass = mini_get_class (method, read32 (ip + 1), generic_context);
10559 CHECK_TYPELOAD (klass);
10560 /* we need to make sure that this array is exactly the type it needs
10561 * to be for correctness. the wrappers are lax with their usage
10562 * so we need to ignore them here
10564 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
10565 MonoClass *array_class = mono_array_class_get (klass, 1);
10566 mini_emit_check_array_type (cfg, sp [0], array_class);
10567 CHECK_TYPELOAD (array_class);
10571 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
10576 case CEE_LDELEM_I1:
10577 case CEE_LDELEM_U1:
10578 case CEE_LDELEM_I2:
10579 case CEE_LDELEM_U2:
10580 case CEE_LDELEM_I4:
10581 case CEE_LDELEM_U4:
10582 case CEE_LDELEM_I8:
10584 case CEE_LDELEM_R4:
10585 case CEE_LDELEM_R8:
10586 case CEE_LDELEM_REF: {
10592 if (*ip == CEE_LDELEM) {
10594 token = read32 (ip + 1);
10595 klass = mini_get_class (method, token, generic_context);
10596 CHECK_TYPELOAD (klass);
10597 mono_class_init (klass);
10600 klass = array_access_to_klass (*ip);
10602 if (sp [0]->type != STACK_OBJ)
10605 cfg->flags |= MONO_CFG_HAS_LDELEMA;
10607 if (mini_is_gsharedvt_variable_klass (cfg, klass)) {
10608 // FIXME-VT: OP_ICONST optimization
10609 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
10610 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
10611 ins->opcode = OP_LOADV_MEMBASE;
10612 } else if (sp [1]->opcode == OP_ICONST) {
10613 int array_reg = sp [0]->dreg;
10614 int index_reg = sp [1]->dreg;
10615 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
10617 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
10618 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
10620 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
10621 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
10624 if (*ip == CEE_LDELEM)
10631 case CEE_STELEM_I1:
10632 case CEE_STELEM_I2:
10633 case CEE_STELEM_I4:
10634 case CEE_STELEM_I8:
10635 case CEE_STELEM_R4:
10636 case CEE_STELEM_R8:
10637 case CEE_STELEM_REF:
10642 cfg->flags |= MONO_CFG_HAS_LDELEMA;
10644 if (*ip == CEE_STELEM) {
10646 token = read32 (ip + 1);
10647 klass = mini_get_class (method, token, generic_context);
10648 CHECK_TYPELOAD (klass);
10649 mono_class_init (klass);
10652 klass = array_access_to_klass (*ip);
10654 if (sp [0]->type != STACK_OBJ)
10657 emit_array_store (cfg, klass, sp, TRUE);
10659 if (*ip == CEE_STELEM)
10666 case CEE_CKFINITE: {
10670 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
10671 ins->sreg1 = sp [0]->dreg;
10672 ins->dreg = alloc_freg (cfg);
10673 ins->type = STACK_R8;
10674 MONO_ADD_INS (bblock, ins);
10676 *sp++ = mono_decompose_opcode (cfg, ins);
10681 case CEE_REFANYVAL: {
10682 MonoInst *src_var, *src;
10684 int klass_reg = alloc_preg (cfg);
10685 int dreg = alloc_preg (cfg);
10687 GSHAREDVT_FAILURE (*ip);
10690 MONO_INST_NEW (cfg, ins, *ip);
10693 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
10694 CHECK_TYPELOAD (klass);
10695 mono_class_init (klass);
10697 context_used = mini_class_check_context_used (cfg, klass);
10700 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
10702 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
10703 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
10704 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass));
10706 if (context_used) {
10707 MonoInst *klass_ins;
10709 klass_ins = emit_get_rgctx_klass (cfg, context_used,
10710 klass, MONO_RGCTX_INFO_KLASS);
10713 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
10714 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
10716 mini_emit_class_check (cfg, klass_reg, klass);
10718 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, value));
10719 ins->type = STACK_MP;
10724 case CEE_MKREFANY: {
10725 MonoInst *loc, *addr;
10727 GSHAREDVT_FAILURE (*ip);
10730 MONO_INST_NEW (cfg, ins, *ip);
10733 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
10734 CHECK_TYPELOAD (klass);
10735 mono_class_init (klass);
10737 context_used = mini_class_check_context_used (cfg, klass);
10739 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
10740 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
10742 if (context_used) {
10743 MonoInst *const_ins;
10744 int type_reg = alloc_preg (cfg);
10746 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
10747 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
10748 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, G_STRUCT_OFFSET (MonoClass, byval_arg));
10749 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
10750 } else if (cfg->compile_aot) {
10751 int const_reg = alloc_preg (cfg);
10752 int type_reg = alloc_preg (cfg);
10754 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
10755 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
10756 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, G_STRUCT_OFFSET (MonoClass, byval_arg));
10757 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
10759 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
10760 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), klass);
10762 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
10764 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
10765 ins->type = STACK_VTYPE;
10766 ins->klass = mono_defaults.typed_reference_class;
10771 case CEE_LDTOKEN: {
10773 MonoClass *handle_class;
10775 CHECK_STACK_OVF (1);
10778 n = read32 (ip + 1);
10780 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
10781 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
10782 handle = mono_method_get_wrapper_data (method, n);
10783 handle_class = mono_method_get_wrapper_data (method, n + 1);
10784 if (handle_class == mono_defaults.typehandle_class)
10785 handle = &((MonoClass*)handle)->byval_arg;
10788 handle = mono_ldtoken (image, n, &handle_class, generic_context);
10792 mono_class_init (handle_class);
10793 if (cfg->generic_sharing_context) {
10794 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
10795 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
10796 /* This case handles ldtoken
10797 of an open type, like for
10800 } else if (handle_class == mono_defaults.typehandle_class) {
10801 /* If we get a MONO_TYPE_CLASS
10802 then we need to provide the
10804 instantiation of it. */
10805 if (mono_type_get_type (handle) == MONO_TYPE_CLASS)
10808 context_used = mini_class_check_context_used (cfg, mono_class_from_mono_type (handle));
10809 } else if (handle_class == mono_defaults.fieldhandle_class)
10810 context_used = mini_class_check_context_used (cfg, ((MonoClassField*)handle)->parent);
10811 else if (handle_class == mono_defaults.methodhandle_class)
10812 context_used = mini_method_check_context_used (cfg, handle);
10814 g_assert_not_reached ();
10817 if ((cfg->opt & MONO_OPT_SHARED) &&
10818 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
10819 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
10820 MonoInst *addr, *vtvar, *iargs [3];
10821 int method_context_used;
10823 method_context_used = mini_method_check_context_used (cfg, method);
10825 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
10827 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
10828 EMIT_NEW_ICONST (cfg, iargs [1], n);
10829 if (method_context_used) {
10830 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
10831 method, MONO_RGCTX_INFO_METHOD);
10832 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
10834 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
10835 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
10837 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
10839 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
10841 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
10843 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
10844 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
10845 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
10846 (cmethod->klass == mono_defaults.systemtype_class) &&
10847 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
10848 MonoClass *tclass = mono_class_from_mono_type (handle);
10850 mono_class_init (tclass);
10851 if (context_used) {
10852 ins = emit_get_rgctx_klass (cfg, context_used,
10853 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
10854 } else if (cfg->compile_aot) {
10855 if (method->wrapper_type) {
10856 if (mono_class_get (tclass->image, tclass->type_token) == tclass && !generic_context) {
10857 /* Special case for static synchronized wrappers */
10858 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
10860 /* FIXME: n is not a normal token */
10862 EMIT_NEW_PCONST (cfg, ins, NULL);
10865 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
10868 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
10870 ins->type = STACK_OBJ;
10871 ins->klass = cmethod->klass;
10874 MonoInst *addr, *vtvar;
10876 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
10878 if (context_used) {
10879 if (handle_class == mono_defaults.typehandle_class) {
10880 ins = emit_get_rgctx_klass (cfg, context_used,
10881 mono_class_from_mono_type (handle),
10882 MONO_RGCTX_INFO_TYPE);
10883 } else if (handle_class == mono_defaults.methodhandle_class) {
10884 ins = emit_get_rgctx_method (cfg, context_used,
10885 handle, MONO_RGCTX_INFO_METHOD);
10886 } else if (handle_class == mono_defaults.fieldhandle_class) {
10887 ins = emit_get_rgctx_field (cfg, context_used,
10888 handle, MONO_RGCTX_INFO_CLASS_FIELD);
10890 g_assert_not_reached ();
10892 } else if (cfg->compile_aot) {
10893 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n, generic_context);
10895 EMIT_NEW_PCONST (cfg, ins, handle);
10897 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
10898 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
10899 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
10909 MONO_INST_NEW (cfg, ins, OP_THROW);
10911 ins->sreg1 = sp [0]->dreg;
10913 bblock->out_of_line = TRUE;
10914 MONO_ADD_INS (bblock, ins);
10915 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
10916 MONO_ADD_INS (bblock, ins);
10919 link_bblock (cfg, bblock, end_bblock);
10920 start_new_bblock = 1;
10922 case CEE_ENDFINALLY:
10923 /* mono_save_seq_point_info () depends on this */
10924 if (sp != stack_start)
10925 emit_seq_point (cfg, method, ip, FALSE, FALSE);
10926 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
10927 MONO_ADD_INS (bblock, ins);
10929 start_new_bblock = 1;
10932 * Control will leave the method so empty the stack, otherwise
10933 * the next basic block will start with a nonempty stack.
10935 while (sp != stack_start) {
10940 case CEE_LEAVE_S: {
10943 if (*ip == CEE_LEAVE) {
10945 target = ip + 5 + (gint32)read32(ip + 1);
10948 target = ip + 2 + (signed char)(ip [1]);
10951 /* empty the stack */
10952 while (sp != stack_start) {
10957 * If this leave statement is in a catch block, check for a
10958 * pending exception, and rethrow it if necessary.
10959 * We avoid doing this in runtime invoke wrappers, since those are called
10960 * by native code which excepts the wrapper to catch all exceptions.
10962 for (i = 0; i < header->num_clauses; ++i) {
10963 MonoExceptionClause *clause = &header->clauses [i];
10966 * Use <= in the final comparison to handle clauses with multiple
10967 * leave statements, like in bug #78024.
10968 * The ordering of the exception clauses guarantees that we find the
10969 * innermost clause.
10971 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
10973 MonoBasicBlock *dont_throw;
10978 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
10981 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
10983 NEW_BBLOCK (cfg, dont_throw);
10986 * Currently, we always rethrow the abort exception, despite the
10987 * fact that this is not correct. See thread6.cs for an example.
10988 * But propagating the abort exception is more important than
10989 * getting the sematics right.
10991 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
10992 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
10993 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
10995 MONO_START_BB (cfg, dont_throw);
11000 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
11002 MonoExceptionClause *clause;
11004 for (tmp = handlers; tmp; tmp = tmp->next) {
11005 clause = tmp->data;
11006 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
11008 link_bblock (cfg, bblock, tblock);
11009 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
11010 ins->inst_target_bb = tblock;
11011 ins->inst_eh_block = clause;
11012 MONO_ADD_INS (bblock, ins);
11013 bblock->has_call_handler = 1;
11014 if (COMPILE_LLVM (cfg)) {
11015 MonoBasicBlock *target_bb;
11018 * Link the finally bblock with the target, since it will
11019 * conceptually branch there.
11020 * FIXME: Have to link the bblock containing the endfinally.
11022 GET_BBLOCK (cfg, target_bb, target);
11023 link_bblock (cfg, tblock, target_bb);
11026 g_list_free (handlers);
11029 MONO_INST_NEW (cfg, ins, OP_BR);
11030 MONO_ADD_INS (bblock, ins);
11031 GET_BBLOCK (cfg, tblock, target);
11032 link_bblock (cfg, bblock, tblock);
11033 ins->inst_target_bb = tblock;
11034 start_new_bblock = 1;
11036 if (*ip == CEE_LEAVE)
11045 * Mono specific opcodes
11047 case MONO_CUSTOM_PREFIX: {
11049 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
11053 case CEE_MONO_ICALL: {
11055 MonoJitICallInfo *info;
11057 token = read32 (ip + 2);
11058 func = mono_method_get_wrapper_data (method, token);
11059 info = mono_find_jit_icall_by_addr (func);
11061 g_error ("Could not find icall address in wrapper %s", mono_method_full_name (method, 1));
11064 CHECK_STACK (info->sig->param_count);
11065 sp -= info->sig->param_count;
11067 ins = mono_emit_jit_icall (cfg, info->func, sp);
11068 if (!MONO_TYPE_IS_VOID (info->sig->ret))
11072 inline_costs += 10 * num_calls++;
11076 case CEE_MONO_LDPTR: {
11079 CHECK_STACK_OVF (1);
11081 token = read32 (ip + 2);
11083 ptr = mono_method_get_wrapper_data (method, token);
11084 /* FIXME: Generalize this */
11085 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
11086 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
11091 EMIT_NEW_PCONST (cfg, ins, ptr);
11094 inline_costs += 10 * num_calls++;
11095 /* Can't embed random pointers into AOT code */
11099 case CEE_MONO_JIT_ICALL_ADDR: {
11100 MonoJitICallInfo *callinfo;
11103 CHECK_STACK_OVF (1);
11105 token = read32 (ip + 2);
11107 ptr = mono_method_get_wrapper_data (method, token);
11108 callinfo = mono_find_jit_icall_by_addr (ptr);
11109 g_assert (callinfo);
11110 EMIT_NEW_JIT_ICALL_ADDRCONST (cfg, ins, (char*)callinfo->name);
11113 inline_costs += 10 * num_calls++;
11116 case CEE_MONO_ICALL_ADDR: {
11117 MonoMethod *cmethod;
11120 CHECK_STACK_OVF (1);
11122 token = read32 (ip + 2);
11124 cmethod = mono_method_get_wrapper_data (method, token);
11126 if (cfg->compile_aot) {
11127 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
11129 ptr = mono_lookup_internal_call (cmethod);
11131 EMIT_NEW_PCONST (cfg, ins, ptr);
11137 case CEE_MONO_VTADDR: {
11138 MonoInst *src_var, *src;
11144 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
11145 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
11150 case CEE_MONO_NEWOBJ: {
11151 MonoInst *iargs [2];
11153 CHECK_STACK_OVF (1);
11155 token = read32 (ip + 2);
11156 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
11157 mono_class_init (klass);
11158 NEW_DOMAINCONST (cfg, iargs [0]);
11159 MONO_ADD_INS (cfg->cbb, iargs [0]);
11160 NEW_CLASSCONST (cfg, iargs [1], klass);
11161 MONO_ADD_INS (cfg->cbb, iargs [1]);
11162 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
11164 inline_costs += 10 * num_calls++;
11167 case CEE_MONO_OBJADDR:
11170 MONO_INST_NEW (cfg, ins, OP_MOVE);
11171 ins->dreg = alloc_ireg_mp (cfg);
11172 ins->sreg1 = sp [0]->dreg;
11173 ins->type = STACK_MP;
11174 MONO_ADD_INS (cfg->cbb, ins);
11178 case CEE_MONO_LDNATIVEOBJ:
11180 * Similar to LDOBJ, but instead load the unmanaged
11181 * representation of the vtype to the stack.
11186 token = read32 (ip + 2);
11187 klass = mono_method_get_wrapper_data (method, token);
11188 g_assert (klass->valuetype);
11189 mono_class_init (klass);
11192 MonoInst *src, *dest, *temp;
11195 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
11196 temp->backend.is_pinvoke = 1;
11197 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
11198 mini_emit_stobj (cfg, dest, src, klass, TRUE);
11200 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
11201 dest->type = STACK_VTYPE;
11202 dest->klass = klass;
11208 case CEE_MONO_RETOBJ: {
11210 * Same as RET, but return the native representation of a vtype
11213 g_assert (cfg->ret);
11214 g_assert (mono_method_signature (method)->pinvoke);
11219 token = read32 (ip + 2);
11220 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
11222 if (!cfg->vret_addr) {
11223 g_assert (cfg->ret_var_is_local);
11225 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
11227 EMIT_NEW_RETLOADA (cfg, ins);
11229 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
11231 if (sp != stack_start)
11234 MONO_INST_NEW (cfg, ins, OP_BR);
11235 ins->inst_target_bb = end_bblock;
11236 MONO_ADD_INS (bblock, ins);
11237 link_bblock (cfg, bblock, end_bblock);
11238 start_new_bblock = 1;
11242 case CEE_MONO_CISINST:
11243 case CEE_MONO_CCASTCLASS: {
11248 token = read32 (ip + 2);
11249 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
11250 if (ip [1] == CEE_MONO_CISINST)
11251 ins = handle_cisinst (cfg, klass, sp [0]);
11253 ins = handle_ccastclass (cfg, klass, sp [0]);
11259 case CEE_MONO_SAVE_LMF:
11260 case CEE_MONO_RESTORE_LMF:
11261 #ifdef MONO_ARCH_HAVE_LMF_OPS
11262 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
11263 MONO_ADD_INS (bblock, ins);
11264 cfg->need_lmf_area = TRUE;
11268 case CEE_MONO_CLASSCONST:
11269 CHECK_STACK_OVF (1);
11271 token = read32 (ip + 2);
11272 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
11275 inline_costs += 10 * num_calls++;
11277 case CEE_MONO_NOT_TAKEN:
11278 bblock->out_of_line = TRUE;
11281 case CEE_MONO_TLS: {
11284 CHECK_STACK_OVF (1);
11286 key = (gint32)read32 (ip + 2);
11287 g_assert (key < TLS_KEY_NUM);
11289 ins = mono_create_tls_get (cfg, key);
11291 if (cfg->compile_aot) {
11293 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
11294 ins->dreg = alloc_preg (cfg);
11295 ins->type = STACK_PTR;
11297 g_assert_not_reached ();
11300 ins->type = STACK_PTR;
11301 MONO_ADD_INS (bblock, ins);
11306 case CEE_MONO_DYN_CALL: {
11307 MonoCallInst *call;
11309 /* It would be easier to call a trampoline, but that would put an
11310 * extra frame on the stack, confusing exception handling. So
11311 * implement it inline using an opcode for now.
11314 if (!cfg->dyn_call_var) {
11315 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
11316 /* prevent it from being register allocated */
11317 cfg->dyn_call_var->flags |= MONO_INST_VOLATILE;
11320 /* Has to use a call inst since it local regalloc expects it */
11321 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
11322 ins = (MonoInst*)call;
11324 ins->sreg1 = sp [0]->dreg;
11325 ins->sreg2 = sp [1]->dreg;
11326 MONO_ADD_INS (bblock, ins);
11328 cfg->param_area = MAX (cfg->param_area, MONO_ARCH_DYN_CALL_PARAM_AREA);
11331 inline_costs += 10 * num_calls++;
11335 case CEE_MONO_MEMORY_BARRIER: {
11337 emit_memory_barrier (cfg, (int)read32 (ip + 1));
11341 case CEE_MONO_JIT_ATTACH: {
11342 MonoInst *args [16];
11343 MonoInst *ad_ins, *lmf_ins;
11344 MonoBasicBlock *next_bb = NULL;
11346 cfg->orig_domain_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
11348 EMIT_NEW_PCONST (cfg, ins, NULL);
11349 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
11355 ad_ins = mono_get_domain_intrinsic (cfg);
11356 lmf_ins = mono_get_lmf_intrinsic (cfg);
11359 if (MONO_ARCH_HAVE_TLS_GET && ad_ins && lmf_ins) {
11360 NEW_BBLOCK (cfg, next_bb);
11362 MONO_ADD_INS (cfg->cbb, ad_ins);
11363 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, ad_ins->dreg, 0);
11364 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, next_bb);
11366 MONO_ADD_INS (cfg->cbb, lmf_ins);
11367 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, lmf_ins->dreg, 0);
11368 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, next_bb);
11371 if (cfg->compile_aot) {
11372 /* AOT code is only used in the root domain */
11373 EMIT_NEW_PCONST (cfg, args [0], NULL);
11375 EMIT_NEW_PCONST (cfg, args [0], cfg->domain);
11377 ins = mono_emit_jit_icall (cfg, mono_jit_thread_attach, args);
11378 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
11381 MONO_START_BB (cfg, next_bb);
11387 case CEE_MONO_JIT_DETACH: {
11388 MonoInst *args [16];
11390 /* Restore the original domain */
11391 dreg = alloc_ireg (cfg);
11392 EMIT_NEW_UNALU (cfg, args [0], OP_MOVE, dreg, cfg->orig_domain_var->dreg);
11393 mono_emit_jit_icall (cfg, mono_jit_set_domain, args);
11398 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
11404 case CEE_PREFIX1: {
11407 case CEE_ARGLIST: {
11408 /* somewhat similar to LDTOKEN */
11409 MonoInst *addr, *vtvar;
11410 CHECK_STACK_OVF (1);
11411 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
11413 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11414 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
11416 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11417 ins->type = STACK_VTYPE;
11418 ins->klass = mono_defaults.argumenthandle_class;
11431 * The following transforms:
11432 * CEE_CEQ into OP_CEQ
11433 * CEE_CGT into OP_CGT
11434 * CEE_CGT_UN into OP_CGT_UN
11435 * CEE_CLT into OP_CLT
11436 * CEE_CLT_UN into OP_CLT_UN
11438 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
11440 MONO_INST_NEW (cfg, ins, cmp->opcode);
11442 cmp->sreg1 = sp [0]->dreg;
11443 cmp->sreg2 = sp [1]->dreg;
11444 type_from_op (cmp, sp [0], sp [1]);
11446 if ((sp [0]->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
11447 cmp->opcode = OP_LCOMPARE;
11448 else if (sp [0]->type == STACK_R8)
11449 cmp->opcode = OP_FCOMPARE;
11451 cmp->opcode = OP_ICOMPARE;
11452 MONO_ADD_INS (bblock, cmp);
11453 ins->type = STACK_I4;
11454 ins->dreg = alloc_dreg (cfg, ins->type);
11455 type_from_op (ins, sp [0], sp [1]);
11457 if (cmp->opcode == OP_FCOMPARE) {
11459 * The backends expect the fceq opcodes to do the
11462 cmp->opcode = OP_NOP;
11463 ins->sreg1 = cmp->sreg1;
11464 ins->sreg2 = cmp->sreg2;
11466 MONO_ADD_INS (bblock, ins);
11472 MonoInst *argconst;
11473 MonoMethod *cil_method;
11475 CHECK_STACK_OVF (1);
11477 n = read32 (ip + 2);
11478 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
11479 if (!cmethod || mono_loader_get_last_error ())
11481 mono_class_init (cmethod->klass);
11483 mono_save_token_info (cfg, image, n, cmethod);
11485 context_used = mini_method_check_context_used (cfg, cmethod);
11487 cil_method = cmethod;
11488 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
11489 METHOD_ACCESS_FAILURE;
11491 if (mono_security_cas_enabled ()) {
11492 if (check_linkdemand (cfg, method, cmethod))
11493 INLINE_FAILURE ("linkdemand");
11494 CHECK_CFG_EXCEPTION;
11495 } else if (mono_security_core_clr_enabled ()) {
11496 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
11500 * Optimize the common case of ldftn+delegate creation
11502 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
11503 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
11504 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
11505 MonoInst *target_ins;
11506 MonoMethod *invoke;
11507 int invoke_context_used;
11509 invoke = mono_get_delegate_invoke (ctor_method->klass);
11510 if (!invoke || !mono_method_signature (invoke))
11513 invoke_context_used = mini_method_check_context_used (cfg, invoke);
11515 target_ins = sp [-1];
11517 if (mono_security_core_clr_enabled ())
11518 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method, bblock, ip);
11520 if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
11521 /*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
11522 if (mono_method_signature (invoke)->param_count == mono_method_signature (cmethod)->param_count) {
11523 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0);
11524 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException");
11528 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE)
11529 /* FIXME: SGEN support */
11530 if (invoke_context_used == 0) {
11532 if (cfg->verbose_level > 3)
11533 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
11535 *sp = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used);
11536 CHECK_CFG_EXCEPTION;
11545 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
11546 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
11550 inline_costs += 10 * num_calls++;
11553 case CEE_LDVIRTFTN: {
11554 MonoInst *args [2];
11558 n = read32 (ip + 2);
11559 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
11560 if (!cmethod || mono_loader_get_last_error ())
11562 mono_class_init (cmethod->klass);
11564 context_used = mini_method_check_context_used (cfg, cmethod);
11566 if (mono_security_cas_enabled ()) {
11567 if (check_linkdemand (cfg, method, cmethod))
11568 INLINE_FAILURE ("linkdemand");
11569 CHECK_CFG_EXCEPTION;
11570 } else if (mono_security_core_clr_enabled ()) {
11571 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
11577 args [1] = emit_get_rgctx_method (cfg, context_used,
11578 cmethod, MONO_RGCTX_INFO_METHOD);
11581 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
11583 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
11586 inline_costs += 10 * num_calls++;
11590 CHECK_STACK_OVF (1);
11592 n = read16 (ip + 2);
11594 EMIT_NEW_ARGLOAD (cfg, ins, n);
11599 CHECK_STACK_OVF (1);
11601 n = read16 (ip + 2);
11603 NEW_ARGLOADA (cfg, ins, n);
11604 MONO_ADD_INS (cfg->cbb, ins);
11612 n = read16 (ip + 2);
11614 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
11616 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
11620 CHECK_STACK_OVF (1);
11622 n = read16 (ip + 2);
11624 EMIT_NEW_LOCLOAD (cfg, ins, n);
11629 unsigned char *tmp_ip;
11630 CHECK_STACK_OVF (1);
11632 n = read16 (ip + 2);
11635 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
11641 EMIT_NEW_LOCLOADA (cfg, ins, n);
11650 n = read16 (ip + 2);
11652 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
11654 emit_stloc_ir (cfg, sp, header, n);
11661 if (sp != stack_start)
11663 if (cfg->method != method)
11665 * Inlining this into a loop in a parent could lead to
11666 * stack overflows which is different behavior than the
11667 * non-inlined case, thus disable inlining in this case.
11669 goto inline_failure;
11671 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
11672 ins->dreg = alloc_preg (cfg);
11673 ins->sreg1 = sp [0]->dreg;
11674 ins->type = STACK_PTR;
11675 MONO_ADD_INS (cfg->cbb, ins);
11677 cfg->flags |= MONO_CFG_HAS_ALLOCA;
11679 ins->flags |= MONO_INST_INIT;
11684 case CEE_ENDFILTER: {
11685 MonoExceptionClause *clause, *nearest;
11686 int cc, nearest_num;
11690 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
11692 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
11693 ins->sreg1 = (*sp)->dreg;
11694 MONO_ADD_INS (bblock, ins);
11695 start_new_bblock = 1;
11700 for (cc = 0; cc < header->num_clauses; ++cc) {
11701 clause = &header->clauses [cc];
11702 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
11703 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
11704 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
11709 g_assert (nearest);
11710 if ((ip - header->code) != nearest->handler_offset)
11715 case CEE_UNALIGNED_:
11716 ins_flag |= MONO_INST_UNALIGNED;
11717 /* FIXME: record alignment? we can assume 1 for now */
11721 case CEE_VOLATILE_:
11722 ins_flag |= MONO_INST_VOLATILE;
11726 ins_flag |= MONO_INST_TAILCALL;
11727 cfg->flags |= MONO_CFG_HAS_TAIL;
11728 /* Can't inline tail calls at this time */
11729 inline_costs += 100000;
11736 token = read32 (ip + 2);
11737 klass = mini_get_class (method, token, generic_context);
11738 CHECK_TYPELOAD (klass);
11739 if (generic_class_is_reference_type (cfg, klass))
11740 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
11742 mini_emit_initobj (cfg, *sp, NULL, klass);
11746 case CEE_CONSTRAINED_:
11748 token = read32 (ip + 2);
11749 constrained_call = mini_get_class (method, token, generic_context);
11750 CHECK_TYPELOAD (constrained_call);
11754 case CEE_INITBLK: {
11755 MonoInst *iargs [3];
11759 if ((ip [1] == CEE_CPBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
11760 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
11761 } else if ((ip [1] == CEE_INITBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
11762 /* emit_memset only works when val == 0 */
11763 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
11765 iargs [0] = sp [0];
11766 iargs [1] = sp [1];
11767 iargs [2] = sp [2];
11768 if (ip [1] == CEE_CPBLK) {
11769 MonoMethod *memcpy_method = get_memcpy_method ();
11770 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
11772 MonoMethod *memset_method = get_memset_method ();
11773 mono_emit_method_call (cfg, memset_method, iargs, NULL);
11783 ins_flag |= MONO_INST_NOTYPECHECK;
11785 ins_flag |= MONO_INST_NORANGECHECK;
11786 /* we ignore the no-nullcheck for now since we
11787 * really do it explicitly only when doing callvirt->call
11791 case CEE_RETHROW: {
11793 int handler_offset = -1;
11795 for (i = 0; i < header->num_clauses; ++i) {
11796 MonoExceptionClause *clause = &header->clauses [i];
11797 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
11798 handler_offset = clause->handler_offset;
11803 bblock->flags |= BB_EXCEPTION_UNSAFE;
11805 g_assert (handler_offset != -1);
11807 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
11808 MONO_INST_NEW (cfg, ins, OP_RETHROW);
11809 ins->sreg1 = load->dreg;
11810 MONO_ADD_INS (bblock, ins);
11812 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
11813 MONO_ADD_INS (bblock, ins);
11816 link_bblock (cfg, bblock, end_bblock);
11817 start_new_bblock = 1;
11825 GSHAREDVT_FAILURE (*ip);
11827 CHECK_STACK_OVF (1);
11829 token = read32 (ip + 2);
11830 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !method->klass->image->dynamic && !generic_context) {
11831 MonoType *type = mono_type_create_from_typespec (image, token);
11832 val = mono_type_size (type, &ialign);
11834 MonoClass *klass = mono_class_get_full (image, token, generic_context);
11835 CHECK_TYPELOAD (klass);
11836 mono_class_init (klass);
11837 val = mono_type_size (&klass->byval_arg, &ialign);
11839 EMIT_NEW_ICONST (cfg, ins, val);
11844 case CEE_REFANYTYPE: {
11845 MonoInst *src_var, *src;
11847 GSHAREDVT_FAILURE (*ip);
11853 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
11855 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
11856 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
11857 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, type));
11862 case CEE_READONLY_:
11875 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
11885 g_warning ("opcode 0x%02x not handled", *ip);
11889 if (start_new_bblock != 1)
11892 bblock->cil_length = ip - bblock->cil_code;
11893 if (bblock->next_bb) {
11894 /* This could already be set because of inlining, #693905 */
11895 MonoBasicBlock *bb = bblock;
11897 while (bb->next_bb)
11899 bb->next_bb = end_bblock;
11901 bblock->next_bb = end_bblock;
11904 if (cfg->method == method && cfg->domainvar) {
11906 MonoInst *get_domain;
11908 cfg->cbb = init_localsbb;
11910 if ((get_domain = mono_get_domain_intrinsic (cfg))) {
11911 MONO_ADD_INS (cfg->cbb, get_domain);
11913 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
11915 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
11916 MONO_ADD_INS (cfg->cbb, store);
11919 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
11920 if (cfg->compile_aot)
11921 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
11922 mono_get_got_var (cfg);
11925 if (cfg->method == method && cfg->got_var)
11926 mono_emit_load_got_addr (cfg);
11929 cfg->cbb = init_localsbb;
11931 for (i = 0; i < header->num_locals; ++i) {
11932 emit_init_local (cfg, i, header->locals [i]);
11936 if (cfg->init_ref_vars && cfg->method == method) {
11937 /* Emit initialization for ref vars */
11938 // FIXME: Avoid duplication initialization for IL locals.
11939 for (i = 0; i < cfg->num_varinfo; ++i) {
11940 MonoInst *ins = cfg->varinfo [i];
11942 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
11943 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
11947 if (cfg->lmf_var && cfg->method == method) {
11948 cfg->cbb = init_localsbb;
11949 emit_push_lmf (cfg);
11953 MonoBasicBlock *bb;
11956 * Make seq points at backward branch targets interruptable.
11958 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
11959 if (bb->code && bb->in_count > 1 && bb->code->opcode == OP_SEQ_POINT)
11960 bb->code->flags |= MONO_INST_SINGLE_STEP_LOC;
11963 /* Add a sequence point for method entry/exit events */
11965 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
11966 MONO_ADD_INS (init_localsbb, ins);
11967 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
11968 MONO_ADD_INS (cfg->bb_exit, ins);
11972 * Add seq points for IL offsets which have line number info, but wasn't generated a seq point during JITting because
11973 * the code they refer to was dead (#11880).
11975 if (sym_seq_points) {
11976 for (i = 0; i < header->code_size; ++i) {
11977 if (mono_bitset_test_fast (seq_point_locs, i) && !mono_bitset_test_fast (seq_point_set_locs, i)) {
11980 NEW_SEQ_POINT (cfg, ins, i, FALSE);
11981 mono_add_seq_point (cfg, NULL, ins, SEQ_POINT_NATIVE_OFFSET_DEAD_CODE);
11988 if (cfg->method == method) {
11989 MonoBasicBlock *bb;
11990 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
11991 bb->region = mono_find_block_region (cfg, bb->real_offset);
11993 mono_create_spvar_for_region (cfg, bb->region);
11994 if (cfg->verbose_level > 2)
11995 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
11999 g_slist_free (class_inits);
12000 dont_inline = g_list_remove (dont_inline, method);
12002 if (inline_costs < 0) {
12005 /* Method is too large */
12006 mname = mono_method_full_name (method, TRUE);
12007 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
12008 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
12010 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
12011 mono_basic_block_free (original_bb);
12015 if ((cfg->verbose_level > 2) && (cfg->method == method))
12016 mono_print_code (cfg, "AFTER METHOD-TO-IR");
12018 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
12019 mono_basic_block_free (original_bb);
12020 return inline_costs;
12023 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
12030 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
12034 set_exception_type_from_invalid_il (cfg, method, ip);
12038 g_slist_free (class_inits);
12039 mono_basic_block_free (original_bb);
12040 dont_inline = g_list_remove (dont_inline, method);
12041 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
12046 store_membase_reg_to_store_membase_imm (int opcode)
12049 case OP_STORE_MEMBASE_REG:
12050 return OP_STORE_MEMBASE_IMM;
12051 case OP_STOREI1_MEMBASE_REG:
12052 return OP_STOREI1_MEMBASE_IMM;
12053 case OP_STOREI2_MEMBASE_REG:
12054 return OP_STOREI2_MEMBASE_IMM;
12055 case OP_STOREI4_MEMBASE_REG:
12056 return OP_STOREI4_MEMBASE_IMM;
12057 case OP_STOREI8_MEMBASE_REG:
12058 return OP_STOREI8_MEMBASE_IMM;
12060 g_assert_not_reached ();
12067 mono_op_to_op_imm (int opcode)
12071 return OP_IADD_IMM;
12073 return OP_ISUB_IMM;
12075 return OP_IDIV_IMM;
12077 return OP_IDIV_UN_IMM;
12079 return OP_IREM_IMM;
12081 return OP_IREM_UN_IMM;
12083 return OP_IMUL_IMM;
12085 return OP_IAND_IMM;
12089 return OP_IXOR_IMM;
12091 return OP_ISHL_IMM;
12093 return OP_ISHR_IMM;
12095 return OP_ISHR_UN_IMM;
12098 return OP_LADD_IMM;
12100 return OP_LSUB_IMM;
12102 return OP_LAND_IMM;
12106 return OP_LXOR_IMM;
12108 return OP_LSHL_IMM;
12110 return OP_LSHR_IMM;
12112 return OP_LSHR_UN_IMM;
12115 return OP_COMPARE_IMM;
12117 return OP_ICOMPARE_IMM;
12119 return OP_LCOMPARE_IMM;
12121 case OP_STORE_MEMBASE_REG:
12122 return OP_STORE_MEMBASE_IMM;
12123 case OP_STOREI1_MEMBASE_REG:
12124 return OP_STOREI1_MEMBASE_IMM;
12125 case OP_STOREI2_MEMBASE_REG:
12126 return OP_STOREI2_MEMBASE_IMM;
12127 case OP_STOREI4_MEMBASE_REG:
12128 return OP_STOREI4_MEMBASE_IMM;
12130 #if defined(TARGET_X86) || defined (TARGET_AMD64)
12132 return OP_X86_PUSH_IMM;
12133 case OP_X86_COMPARE_MEMBASE_REG:
12134 return OP_X86_COMPARE_MEMBASE_IMM;
12136 #if defined(TARGET_AMD64)
12137 case OP_AMD64_ICOMPARE_MEMBASE_REG:
12138 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
12140 case OP_VOIDCALL_REG:
12141 return OP_VOIDCALL;
12149 return OP_LOCALLOC_IMM;
12156 ldind_to_load_membase (int opcode)
12160 return OP_LOADI1_MEMBASE;
12162 return OP_LOADU1_MEMBASE;
12164 return OP_LOADI2_MEMBASE;
12166 return OP_LOADU2_MEMBASE;
12168 return OP_LOADI4_MEMBASE;
12170 return OP_LOADU4_MEMBASE;
12172 return OP_LOAD_MEMBASE;
12173 case CEE_LDIND_REF:
12174 return OP_LOAD_MEMBASE;
12176 return OP_LOADI8_MEMBASE;
12178 return OP_LOADR4_MEMBASE;
12180 return OP_LOADR8_MEMBASE;
12182 g_assert_not_reached ();
12189 stind_to_store_membase (int opcode)
12193 return OP_STOREI1_MEMBASE_REG;
12195 return OP_STOREI2_MEMBASE_REG;
12197 return OP_STOREI4_MEMBASE_REG;
12199 case CEE_STIND_REF:
12200 return OP_STORE_MEMBASE_REG;
12202 return OP_STOREI8_MEMBASE_REG;
12204 return OP_STORER4_MEMBASE_REG;
12206 return OP_STORER8_MEMBASE_REG;
12208 g_assert_not_reached ();
12215 mono_load_membase_to_load_mem (int opcode)
12217 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
12218 #if defined(TARGET_X86) || defined(TARGET_AMD64)
12220 case OP_LOAD_MEMBASE:
12221 return OP_LOAD_MEM;
12222 case OP_LOADU1_MEMBASE:
12223 return OP_LOADU1_MEM;
12224 case OP_LOADU2_MEMBASE:
12225 return OP_LOADU2_MEM;
12226 case OP_LOADI4_MEMBASE:
12227 return OP_LOADI4_MEM;
12228 case OP_LOADU4_MEMBASE:
12229 return OP_LOADU4_MEM;
12230 #if SIZEOF_REGISTER == 8
12231 case OP_LOADI8_MEMBASE:
12232 return OP_LOADI8_MEM;
12241 op_to_op_dest_membase (int store_opcode, int opcode)
12243 #if defined(TARGET_X86)
12244 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
12249 return OP_X86_ADD_MEMBASE_REG;
12251 return OP_X86_SUB_MEMBASE_REG;
12253 return OP_X86_AND_MEMBASE_REG;
12255 return OP_X86_OR_MEMBASE_REG;
12257 return OP_X86_XOR_MEMBASE_REG;
12260 return OP_X86_ADD_MEMBASE_IMM;
12263 return OP_X86_SUB_MEMBASE_IMM;
12266 return OP_X86_AND_MEMBASE_IMM;
12269 return OP_X86_OR_MEMBASE_IMM;
12272 return OP_X86_XOR_MEMBASE_IMM;
12278 #if defined(TARGET_AMD64)
12279 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
12284 return OP_X86_ADD_MEMBASE_REG;
12286 return OP_X86_SUB_MEMBASE_REG;
12288 return OP_X86_AND_MEMBASE_REG;
12290 return OP_X86_OR_MEMBASE_REG;
12292 return OP_X86_XOR_MEMBASE_REG;
12294 return OP_X86_ADD_MEMBASE_IMM;
12296 return OP_X86_SUB_MEMBASE_IMM;
12298 return OP_X86_AND_MEMBASE_IMM;
12300 return OP_X86_OR_MEMBASE_IMM;
12302 return OP_X86_XOR_MEMBASE_IMM;
12304 return OP_AMD64_ADD_MEMBASE_REG;
12306 return OP_AMD64_SUB_MEMBASE_REG;
12308 return OP_AMD64_AND_MEMBASE_REG;
12310 return OP_AMD64_OR_MEMBASE_REG;
12312 return OP_AMD64_XOR_MEMBASE_REG;
12315 return OP_AMD64_ADD_MEMBASE_IMM;
12318 return OP_AMD64_SUB_MEMBASE_IMM;
12321 return OP_AMD64_AND_MEMBASE_IMM;
12324 return OP_AMD64_OR_MEMBASE_IMM;
12327 return OP_AMD64_XOR_MEMBASE_IMM;
12337 op_to_op_store_membase (int store_opcode, int opcode)
12339 #if defined(TARGET_X86) || defined(TARGET_AMD64)
12342 if (store_opcode == OP_STOREI1_MEMBASE_REG)
12343 return OP_X86_SETEQ_MEMBASE;
12345 if (store_opcode == OP_STOREI1_MEMBASE_REG)
12346 return OP_X86_SETNE_MEMBASE;
12354 op_to_op_src1_membase (int load_opcode, int opcode)
12357 /* FIXME: This has sign extension issues */
12359 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
12360 return OP_X86_COMPARE_MEMBASE8_IMM;
12363 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
12368 return OP_X86_PUSH_MEMBASE;
12369 case OP_COMPARE_IMM:
12370 case OP_ICOMPARE_IMM:
12371 return OP_X86_COMPARE_MEMBASE_IMM;
12374 return OP_X86_COMPARE_MEMBASE_REG;
12378 #ifdef TARGET_AMD64
12379 /* FIXME: This has sign extension issues */
12381 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
12382 return OP_X86_COMPARE_MEMBASE8_IMM;
12387 #ifdef __mono_ilp32__
12388 if (load_opcode == OP_LOADI8_MEMBASE)
12390 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
12392 return OP_X86_PUSH_MEMBASE;
12394 /* FIXME: This only works for 32 bit immediates
12395 case OP_COMPARE_IMM:
12396 case OP_LCOMPARE_IMM:
12397 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
12398 return OP_AMD64_COMPARE_MEMBASE_IMM;
12400 case OP_ICOMPARE_IMM:
12401 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
12402 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
12406 #ifdef __mono_ilp32__
12407 if (load_opcode == OP_LOAD_MEMBASE)
12408 return OP_AMD64_ICOMPARE_MEMBASE_REG;
12409 if (load_opcode == OP_LOADI8_MEMBASE)
12411 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
12413 return OP_AMD64_COMPARE_MEMBASE_REG;
12416 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
12417 return OP_AMD64_ICOMPARE_MEMBASE_REG;
12426 op_to_op_src2_membase (int load_opcode, int opcode)
12429 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
12435 return OP_X86_COMPARE_REG_MEMBASE;
12437 return OP_X86_ADD_REG_MEMBASE;
12439 return OP_X86_SUB_REG_MEMBASE;
12441 return OP_X86_AND_REG_MEMBASE;
12443 return OP_X86_OR_REG_MEMBASE;
12445 return OP_X86_XOR_REG_MEMBASE;
12449 #ifdef TARGET_AMD64
12450 #ifdef __mono_ilp32__
12451 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE) ) {
12453 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)) {
12457 return OP_AMD64_ICOMPARE_REG_MEMBASE;
12459 return OP_X86_ADD_REG_MEMBASE;
12461 return OP_X86_SUB_REG_MEMBASE;
12463 return OP_X86_AND_REG_MEMBASE;
12465 return OP_X86_OR_REG_MEMBASE;
12467 return OP_X86_XOR_REG_MEMBASE;
12469 #ifdef __mono_ilp32__
12470 } else if (load_opcode == OP_LOADI8_MEMBASE) {
12472 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE)) {
12477 return OP_AMD64_COMPARE_REG_MEMBASE;
12479 return OP_AMD64_ADD_REG_MEMBASE;
12481 return OP_AMD64_SUB_REG_MEMBASE;
12483 return OP_AMD64_AND_REG_MEMBASE;
12485 return OP_AMD64_OR_REG_MEMBASE;
12487 return OP_AMD64_XOR_REG_MEMBASE;
12496 mono_op_to_op_imm_noemul (int opcode)
12499 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
12505 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
12512 #if defined(MONO_ARCH_EMULATE_MUL_DIV)
12517 return mono_op_to_op_imm (opcode);
12522 * mono_handle_global_vregs:
12524 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
12528 mono_handle_global_vregs (MonoCompile *cfg)
12530 gint32 *vreg_to_bb;
12531 MonoBasicBlock *bb;
12534 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
12536 #ifdef MONO_ARCH_SIMD_INTRINSICS
12537 if (cfg->uses_simd_intrinsics)
12538 mono_simd_simplify_indirection (cfg);
12541 /* Find local vregs used in more than one bb */
12542 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
12543 MonoInst *ins = bb->code;
12544 int block_num = bb->block_num;
12546 if (cfg->verbose_level > 2)
12547 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
12550 for (; ins; ins = ins->next) {
12551 const char *spec = INS_INFO (ins->opcode);
12552 int regtype = 0, regindex;
12555 if (G_UNLIKELY (cfg->verbose_level > 2))
12556 mono_print_ins (ins);
12558 g_assert (ins->opcode >= MONO_CEE_LAST);
12560 for (regindex = 0; regindex < 4; regindex ++) {
12563 if (regindex == 0) {
12564 regtype = spec [MONO_INST_DEST];
12565 if (regtype == ' ')
12568 } else if (regindex == 1) {
12569 regtype = spec [MONO_INST_SRC1];
12570 if (regtype == ' ')
12573 } else if (regindex == 2) {
12574 regtype = spec [MONO_INST_SRC2];
12575 if (regtype == ' ')
12578 } else if (regindex == 3) {
12579 regtype = spec [MONO_INST_SRC3];
12580 if (regtype == ' ')
12585 #if SIZEOF_REGISTER == 4
12586 /* In the LLVM case, the long opcodes are not decomposed */
12587 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
12589 * Since some instructions reference the original long vreg,
12590 * and some reference the two component vregs, it is quite hard
12591 * to determine when it needs to be global. So be conservative.
12593 if (!get_vreg_to_inst (cfg, vreg)) {
12594 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
12596 if (cfg->verbose_level > 2)
12597 printf ("LONG VREG R%d made global.\n", vreg);
12601 * Make the component vregs volatile since the optimizations can
12602 * get confused otherwise.
12604 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
12605 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
12609 g_assert (vreg != -1);
12611 prev_bb = vreg_to_bb [vreg];
12612 if (prev_bb == 0) {
12613 /* 0 is a valid block num */
12614 vreg_to_bb [vreg] = block_num + 1;
12615 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
12616 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
12619 if (!get_vreg_to_inst (cfg, vreg)) {
12620 if (G_UNLIKELY (cfg->verbose_level > 2))
12621 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
12625 if (vreg_is_ref (cfg, vreg))
12626 mono_compile_create_var_for_vreg (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL, vreg);
12628 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
12631 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
12634 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
12637 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
12640 g_assert_not_reached ();
12644 /* Flag as having been used in more than one bb */
12645 vreg_to_bb [vreg] = -1;
12651 /* If a variable is used in only one bblock, convert it into a local vreg */
12652 for (i = 0; i < cfg->num_varinfo; i++) {
12653 MonoInst *var = cfg->varinfo [i];
12654 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
12656 switch (var->type) {
12662 #if SIZEOF_REGISTER == 8
12665 #if !defined(TARGET_X86)
12666 /* Enabling this screws up the fp stack on x86 */
12669 if (mono_arch_is_soft_float ())
12672 /* Arguments are implicitly global */
12673 /* Putting R4 vars into registers doesn't work currently */
12674 /* The gsharedvt vars are implicitly referenced by ldaddr opcodes, but those opcodes are only generated later */
12675 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg && var != cfg->gsharedvt_info_var && var != cfg->gsharedvt_locals_var && var != cfg->lmf_addr_var) {
12677 * Make that the variable's liveness interval doesn't contain a call, since
12678 * that would cause the lvreg to be spilled, making the whole optimization
12681 /* This is too slow for JIT compilation */
12683 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
12685 int def_index, call_index, ins_index;
12686 gboolean spilled = FALSE;
12691 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
12692 const char *spec = INS_INFO (ins->opcode);
12694 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
12695 def_index = ins_index;
12697 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
12698 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
12699 if (call_index > def_index) {
12705 if (MONO_IS_CALL (ins))
12706 call_index = ins_index;
12716 if (G_UNLIKELY (cfg->verbose_level > 2))
12717 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
12718 var->flags |= MONO_INST_IS_DEAD;
12719 cfg->vreg_to_inst [var->dreg] = NULL;
12726 * Compress the varinfo and vars tables so the liveness computation is faster and
12727 * takes up less space.
12730 for (i = 0; i < cfg->num_varinfo; ++i) {
12731 MonoInst *var = cfg->varinfo [i];
12732 if (pos < i && cfg->locals_start == i)
12733 cfg->locals_start = pos;
12734 if (!(var->flags & MONO_INST_IS_DEAD)) {
12736 cfg->varinfo [pos] = cfg->varinfo [i];
12737 cfg->varinfo [pos]->inst_c0 = pos;
12738 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
12739 cfg->vars [pos].idx = pos;
12740 #if SIZEOF_REGISTER == 4
12741 if (cfg->varinfo [pos]->type == STACK_I8) {
12742 /* Modify the two component vars too */
12745 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
12746 var1->inst_c0 = pos;
12747 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
12748 var1->inst_c0 = pos;
12755 cfg->num_varinfo = pos;
12756 if (cfg->locals_start > cfg->num_varinfo)
12757 cfg->locals_start = cfg->num_varinfo;
12761 * mono_spill_global_vars:
12763 * Generate spill code for variables which are not allocated to registers,
12764 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
12765 * code is generated which could be optimized by the local optimization passes.
12768 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
12770 MonoBasicBlock *bb;
12772 int orig_next_vreg;
12773 guint32 *vreg_to_lvreg;
12775 guint32 i, lvregs_len;
12776 gboolean dest_has_lvreg = FALSE;
12777 guint32 stacktypes [128];
12778 MonoInst **live_range_start, **live_range_end;
12779 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
12780 int *gsharedvt_vreg_to_idx = NULL;
12782 *need_local_opts = FALSE;
12784 memset (spec2, 0, sizeof (spec2));
12786 /* FIXME: Move this function to mini.c */
12787 stacktypes ['i'] = STACK_PTR;
12788 stacktypes ['l'] = STACK_I8;
12789 stacktypes ['f'] = STACK_R8;
12790 #ifdef MONO_ARCH_SIMD_INTRINSICS
12791 stacktypes ['x'] = STACK_VTYPE;
12794 #if SIZEOF_REGISTER == 4
12795 /* Create MonoInsts for longs */
12796 for (i = 0; i < cfg->num_varinfo; i++) {
12797 MonoInst *ins = cfg->varinfo [i];
12799 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
12800 switch (ins->type) {
12805 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
12808 g_assert (ins->opcode == OP_REGOFFSET);
12810 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
12812 tree->opcode = OP_REGOFFSET;
12813 tree->inst_basereg = ins->inst_basereg;
12814 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
12816 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
12818 tree->opcode = OP_REGOFFSET;
12819 tree->inst_basereg = ins->inst_basereg;
12820 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
12830 if (cfg->compute_gc_maps) {
12831 /* registers need liveness info even for !non refs */
12832 for (i = 0; i < cfg->num_varinfo; i++) {
12833 MonoInst *ins = cfg->varinfo [i];
12835 if (ins->opcode == OP_REGVAR)
12836 ins->flags |= MONO_INST_GC_TRACK;
12840 if (cfg->gsharedvt) {
12841 gsharedvt_vreg_to_idx = mono_mempool_alloc0 (cfg->mempool, sizeof (int) * cfg->next_vreg);
12843 for (i = 0; i < cfg->num_varinfo; ++i) {
12844 MonoInst *ins = cfg->varinfo [i];
12847 if (mini_is_gsharedvt_variable_type (cfg, ins->inst_vtype)) {
12848 if (i >= cfg->locals_start) {
12850 idx = get_gsharedvt_info_slot (cfg, ins->inst_vtype, MONO_RGCTX_INFO_LOCAL_OFFSET);
12851 gsharedvt_vreg_to_idx [ins->dreg] = idx + 1;
12852 ins->opcode = OP_GSHAREDVT_LOCAL;
12853 ins->inst_imm = idx;
12856 gsharedvt_vreg_to_idx [ins->dreg] = -1;
12857 ins->opcode = OP_GSHAREDVT_ARG_REGOFFSET;
12863 /* FIXME: widening and truncation */
12866 * As an optimization, when a variable allocated to the stack is first loaded into
12867 * an lvreg, we will remember the lvreg and use it the next time instead of loading
12868 * the variable again.
12870 orig_next_vreg = cfg->next_vreg;
12871 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
12872 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
12876 * These arrays contain the first and last instructions accessing a given
12878 * Since we emit bblocks in the same order we process them here, and we
12879 * don't split live ranges, these will precisely describe the live range of
12880 * the variable, i.e. the instruction range where a valid value can be found
12881 * in the variables location.
12882 * The live range is computed using the liveness info computed by the liveness pass.
12883 * We can't use vmv->range, since that is an abstract live range, and we need
12884 * one which is instruction precise.
12885 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
12887 /* FIXME: Only do this if debugging info is requested */
12888 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
12889 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
12890 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
12891 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
12893 /* Add spill loads/stores */
12894 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
12897 if (cfg->verbose_level > 2)
12898 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
12900 /* Clear vreg_to_lvreg array */
12901 for (i = 0; i < lvregs_len; i++)
12902 vreg_to_lvreg [lvregs [i]] = 0;
12906 MONO_BB_FOR_EACH_INS (bb, ins) {
12907 const char *spec = INS_INFO (ins->opcode);
12908 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
12909 gboolean store, no_lvreg;
12910 int sregs [MONO_MAX_SRC_REGS];
12912 if (G_UNLIKELY (cfg->verbose_level > 2))
12913 mono_print_ins (ins);
12915 if (ins->opcode == OP_NOP)
12919 * We handle LDADDR here as well, since it can only be decomposed
12920 * when variable addresses are known.
12922 if (ins->opcode == OP_LDADDR) {
12923 MonoInst *var = ins->inst_p0;
12925 if (var->opcode == OP_VTARG_ADDR) {
12926 /* Happens on SPARC/S390 where vtypes are passed by reference */
12927 MonoInst *vtaddr = var->inst_left;
12928 if (vtaddr->opcode == OP_REGVAR) {
12929 ins->opcode = OP_MOVE;
12930 ins->sreg1 = vtaddr->dreg;
12932 else if (var->inst_left->opcode == OP_REGOFFSET) {
12933 ins->opcode = OP_LOAD_MEMBASE;
12934 ins->inst_basereg = vtaddr->inst_basereg;
12935 ins->inst_offset = vtaddr->inst_offset;
12938 } else if (cfg->gsharedvt && gsharedvt_vreg_to_idx [var->dreg] < 0) {
12939 /* gsharedvt arg passed by ref */
12940 g_assert (var->opcode == OP_GSHAREDVT_ARG_REGOFFSET);
12942 ins->opcode = OP_LOAD_MEMBASE;
12943 ins->inst_basereg = var->inst_basereg;
12944 ins->inst_offset = var->inst_offset;
12945 } else if (cfg->gsharedvt && gsharedvt_vreg_to_idx [var->dreg]) {
12946 MonoInst *load, *load2, *load3;
12947 int idx = gsharedvt_vreg_to_idx [var->dreg] - 1;
12948 int reg1, reg2, reg3;
12949 MonoInst *info_var = cfg->gsharedvt_info_var;
12950 MonoInst *locals_var = cfg->gsharedvt_locals_var;
12954 * Compute the address of the local as gsharedvt_locals_var + gsharedvt_info_var->locals_offsets [idx].
12957 g_assert (var->opcode == OP_GSHAREDVT_LOCAL);
12959 g_assert (info_var);
12960 g_assert (locals_var);
12962 /* Mark the instruction used to compute the locals var as used */
12963 cfg->gsharedvt_locals_var_ins = NULL;
12965 /* Load the offset */
12966 if (info_var->opcode == OP_REGOFFSET) {
12967 reg1 = alloc_ireg (cfg);
12968 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, reg1, info_var->inst_basereg, info_var->inst_offset);
12969 } else if (info_var->opcode == OP_REGVAR) {
12971 reg1 = info_var->dreg;
12973 g_assert_not_reached ();
12975 reg2 = alloc_ireg (cfg);
12976 NEW_LOAD_MEMBASE (cfg, load2, OP_LOADI4_MEMBASE, reg2, reg1, G_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
12977 /* Load the locals area address */
12978 reg3 = alloc_ireg (cfg);
12979 if (locals_var->opcode == OP_REGOFFSET) {
12980 NEW_LOAD_MEMBASE (cfg, load3, OP_LOAD_MEMBASE, reg3, locals_var->inst_basereg, locals_var->inst_offset);
12981 } else if (locals_var->opcode == OP_REGVAR) {
12982 NEW_UNALU (cfg, load3, OP_MOVE, reg3, locals_var->dreg);
12984 g_assert_not_reached ();
12986 /* Compute the address */
12987 ins->opcode = OP_PADD;
12991 mono_bblock_insert_before_ins (bb, ins, load3);
12992 mono_bblock_insert_before_ins (bb, load3, load2);
12994 mono_bblock_insert_before_ins (bb, load2, load);
12996 g_assert (var->opcode == OP_REGOFFSET);
12998 ins->opcode = OP_ADD_IMM;
12999 ins->sreg1 = var->inst_basereg;
13000 ins->inst_imm = var->inst_offset;
13003 *need_local_opts = TRUE;
13004 spec = INS_INFO (ins->opcode);
13007 if (ins->opcode < MONO_CEE_LAST) {
13008 mono_print_ins (ins);
13009 g_assert_not_reached ();
13013 * Store opcodes have destbasereg in the dreg, but in reality, it is an
13017 if (MONO_IS_STORE_MEMBASE (ins)) {
13018 tmp_reg = ins->dreg;
13019 ins->dreg = ins->sreg2;
13020 ins->sreg2 = tmp_reg;
13023 spec2 [MONO_INST_DEST] = ' ';
13024 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
13025 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
13026 spec2 [MONO_INST_SRC3] = ' ';
13028 } else if (MONO_IS_STORE_MEMINDEX (ins))
13029 g_assert_not_reached ();
13034 if (G_UNLIKELY (cfg->verbose_level > 2)) {
13035 printf ("\t %.3s %d", spec, ins->dreg);
13036 num_sregs = mono_inst_get_src_registers (ins, sregs);
13037 for (srcindex = 0; srcindex < num_sregs; ++srcindex)
13038 printf (" %d", sregs [srcindex]);
13045 regtype = spec [MONO_INST_DEST];
13046 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
13049 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
13050 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
13051 MonoInst *store_ins;
13053 MonoInst *def_ins = ins;
13054 int dreg = ins->dreg; /* The original vreg */
13056 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
13058 if (var->opcode == OP_REGVAR) {
13059 ins->dreg = var->dreg;
13060 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
13062 * Instead of emitting a load+store, use a _membase opcode.
13064 g_assert (var->opcode == OP_REGOFFSET);
13065 if (ins->opcode == OP_MOVE) {
13069 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
13070 ins->inst_basereg = var->inst_basereg;
13071 ins->inst_offset = var->inst_offset;
13074 spec = INS_INFO (ins->opcode);
13078 g_assert (var->opcode == OP_REGOFFSET);
13080 prev_dreg = ins->dreg;
13082 /* Invalidate any previous lvreg for this vreg */
13083 vreg_to_lvreg [ins->dreg] = 0;
13087 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
13089 store_opcode = OP_STOREI8_MEMBASE_REG;
13092 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
13094 #if SIZEOF_REGISTER != 8
13095 if (regtype == 'l') {
13096 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
13097 mono_bblock_insert_after_ins (bb, ins, store_ins);
13098 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
13099 mono_bblock_insert_after_ins (bb, ins, store_ins);
13100 def_ins = store_ins;
13105 g_assert (store_opcode != OP_STOREV_MEMBASE);
13107 /* Try to fuse the store into the instruction itself */
13108 /* FIXME: Add more instructions */
13109 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
13110 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
13111 ins->inst_imm = ins->inst_c0;
13112 ins->inst_destbasereg = var->inst_basereg;
13113 ins->inst_offset = var->inst_offset;
13114 spec = INS_INFO (ins->opcode);
13115 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
13116 ins->opcode = store_opcode;
13117 ins->inst_destbasereg = var->inst_basereg;
13118 ins->inst_offset = var->inst_offset;
13122 tmp_reg = ins->dreg;
13123 ins->dreg = ins->sreg2;
13124 ins->sreg2 = tmp_reg;
13127 spec2 [MONO_INST_DEST] = ' ';
13128 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
13129 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
13130 spec2 [MONO_INST_SRC3] = ' ';
13132 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
13133 // FIXME: The backends expect the base reg to be in inst_basereg
13134 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
13136 ins->inst_basereg = var->inst_basereg;
13137 ins->inst_offset = var->inst_offset;
13138 spec = INS_INFO (ins->opcode);
13140 /* printf ("INS: "); mono_print_ins (ins); */
13141 /* Create a store instruction */
13142 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
13144 /* Insert it after the instruction */
13145 mono_bblock_insert_after_ins (bb, ins, store_ins);
13147 def_ins = store_ins;
13150 * We can't assign ins->dreg to var->dreg here, since the
13151 * sregs could use it. So set a flag, and do it after
13154 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
13155 dest_has_lvreg = TRUE;
13160 if (def_ins && !live_range_start [dreg]) {
13161 live_range_start [dreg] = def_ins;
13162 live_range_start_bb [dreg] = bb;
13165 if (cfg->compute_gc_maps && def_ins && (var->flags & MONO_INST_GC_TRACK)) {
13168 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_DEF);
13169 tmp->inst_c1 = dreg;
13170 mono_bblock_insert_after_ins (bb, def_ins, tmp);
13177 num_sregs = mono_inst_get_src_registers (ins, sregs);
13178 for (srcindex = 0; srcindex < 3; ++srcindex) {
13179 regtype = spec [MONO_INST_SRC1 + srcindex];
13180 sreg = sregs [srcindex];
13182 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
13183 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
13184 MonoInst *var = get_vreg_to_inst (cfg, sreg);
13185 MonoInst *use_ins = ins;
13186 MonoInst *load_ins;
13187 guint32 load_opcode;
13189 if (var->opcode == OP_REGVAR) {
13190 sregs [srcindex] = var->dreg;
13191 //mono_inst_set_src_registers (ins, sregs);
13192 live_range_end [sreg] = use_ins;
13193 live_range_end_bb [sreg] = bb;
13195 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
13198 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
13199 /* var->dreg is a hreg */
13200 tmp->inst_c1 = sreg;
13201 mono_bblock_insert_after_ins (bb, ins, tmp);
13207 g_assert (var->opcode == OP_REGOFFSET);
13209 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
13211 g_assert (load_opcode != OP_LOADV_MEMBASE);
13213 if (vreg_to_lvreg [sreg]) {
13214 g_assert (vreg_to_lvreg [sreg] != -1);
13216 /* The variable is already loaded to an lvreg */
13217 if (G_UNLIKELY (cfg->verbose_level > 2))
13218 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
13219 sregs [srcindex] = vreg_to_lvreg [sreg];
13220 //mono_inst_set_src_registers (ins, sregs);
13224 /* Try to fuse the load into the instruction */
13225 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
13226 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
13227 sregs [0] = var->inst_basereg;
13228 //mono_inst_set_src_registers (ins, sregs);
13229 ins->inst_offset = var->inst_offset;
13230 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
13231 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
13232 sregs [1] = var->inst_basereg;
13233 //mono_inst_set_src_registers (ins, sregs);
13234 ins->inst_offset = var->inst_offset;
13236 if (MONO_IS_REAL_MOVE (ins)) {
13237 ins->opcode = OP_NOP;
13240 //printf ("%d ", srcindex); mono_print_ins (ins);
13242 sreg = alloc_dreg (cfg, stacktypes [regtype]);
13244 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
13245 if (var->dreg == prev_dreg) {
13247 * sreg refers to the value loaded by the load
13248 * emitted below, but we need to use ins->dreg
13249 * since it refers to the store emitted earlier.
13253 g_assert (sreg != -1);
13254 vreg_to_lvreg [var->dreg] = sreg;
13255 g_assert (lvregs_len < 1024);
13256 lvregs [lvregs_len ++] = var->dreg;
13260 sregs [srcindex] = sreg;
13261 //mono_inst_set_src_registers (ins, sregs);
13263 #if SIZEOF_REGISTER != 8
13264 if (regtype == 'l') {
13265 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
13266 mono_bblock_insert_before_ins (bb, ins, load_ins);
13267 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
13268 mono_bblock_insert_before_ins (bb, ins, load_ins);
13269 use_ins = load_ins;
13274 #if SIZEOF_REGISTER == 4
13275 g_assert (load_opcode != OP_LOADI8_MEMBASE);
13277 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
13278 mono_bblock_insert_before_ins (bb, ins, load_ins);
13279 use_ins = load_ins;
13283 if (var->dreg < orig_next_vreg) {
13284 live_range_end [var->dreg] = use_ins;
13285 live_range_end_bb [var->dreg] = bb;
13288 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
13291 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
13292 tmp->inst_c1 = var->dreg;
13293 mono_bblock_insert_after_ins (bb, ins, tmp);
13297 mono_inst_set_src_registers (ins, sregs);
13299 if (dest_has_lvreg) {
13300 g_assert (ins->dreg != -1);
13301 vreg_to_lvreg [prev_dreg] = ins->dreg;
13302 g_assert (lvregs_len < 1024);
13303 lvregs [lvregs_len ++] = prev_dreg;
13304 dest_has_lvreg = FALSE;
13308 tmp_reg = ins->dreg;
13309 ins->dreg = ins->sreg2;
13310 ins->sreg2 = tmp_reg;
13313 if (MONO_IS_CALL (ins)) {
13314 /* Clear vreg_to_lvreg array */
13315 for (i = 0; i < lvregs_len; i++)
13316 vreg_to_lvreg [lvregs [i]] = 0;
13318 } else if (ins->opcode == OP_NOP) {
13320 MONO_INST_NULLIFY_SREGS (ins);
13323 if (cfg->verbose_level > 2)
13324 mono_print_ins_index (1, ins);
13327 /* Extend the live range based on the liveness info */
13328 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
13329 for (i = 0; i < cfg->num_varinfo; i ++) {
13330 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
13332 if (vreg_is_volatile (cfg, vi->vreg))
13333 /* The liveness info is incomplete */
13336 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
13337 /* Live from at least the first ins of this bb */
13338 live_range_start [vi->vreg] = bb->code;
13339 live_range_start_bb [vi->vreg] = bb;
13342 if (mono_bitset_test_fast (bb->live_out_set, i)) {
13343 /* Live at least until the last ins of this bb */
13344 live_range_end [vi->vreg] = bb->last_ins;
13345 live_range_end_bb [vi->vreg] = bb;
13351 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
13353 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
13354 * by storing the current native offset into MonoMethodVar->live_range_start/end.
13356 if (cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
13357 for (i = 0; i < cfg->num_varinfo; ++i) {
13358 int vreg = MONO_VARINFO (cfg, i)->vreg;
13361 if (live_range_start [vreg]) {
13362 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
13364 ins->inst_c1 = vreg;
13365 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
13367 if (live_range_end [vreg]) {
13368 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
13370 ins->inst_c1 = vreg;
13371 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
13372 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
13374 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
13380 if (cfg->gsharedvt_locals_var_ins) {
13381 /* Nullify if unused */
13382 cfg->gsharedvt_locals_var_ins->opcode = OP_PCONST;
13383 cfg->gsharedvt_locals_var_ins->inst_imm = 0;
13386 g_free (live_range_start);
13387 g_free (live_range_end);
13388 g_free (live_range_start_bb);
13389 g_free (live_range_end_bb);
13394 * - use 'iadd' instead of 'int_add'
13395 * - handling ovf opcodes: decompose in method_to_ir.
13396 * - unify iregs/fregs
13397 * -> partly done, the missing parts are:
13398 * - a more complete unification would involve unifying the hregs as well, so
13399 * code wouldn't need if (fp) all over the place. but that would mean the hregs
13400 * would no longer map to the machine hregs, so the code generators would need to
13401 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
13402 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
13403 * fp/non-fp branches speeds it up by about 15%.
13404 * - use sext/zext opcodes instead of shifts
13406 * - get rid of TEMPLOADs if possible and use vregs instead
13407 * - clean up usage of OP_P/OP_ opcodes
13408 * - cleanup usage of DUMMY_USE
13409 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
13411 * - set the stack type and allocate a dreg in the EMIT_NEW macros
13412 * - get rid of all the <foo>2 stuff when the new JIT is ready.
13413 * - make sure handle_stack_args () is called before the branch is emitted
13414 * - when the new IR is done, get rid of all unused stuff
13415 * - COMPARE/BEQ as separate instructions or unify them ?
13416 * - keeping them separate allows specialized compare instructions like
13417 * compare_imm, compare_membase
13418 * - most back ends unify fp compare+branch, fp compare+ceq
13419 * - integrate mono_save_args into inline_method
13420 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
13421 * - handle long shift opts on 32 bit platforms somehow: they require
13422 * 3 sregs (2 for arg1 and 1 for arg2)
13423 * - make byref a 'normal' type.
13424 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
13425 * variable if needed.
13426 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
13427 * like inline_method.
13428 * - remove inlining restrictions
13429 * - fix LNEG and enable cfold of INEG
13430 * - generalize x86 optimizations like ldelema as a peephole optimization
13431 * - add store_mem_imm for amd64
13432 * - optimize the loading of the interruption flag in the managed->native wrappers
13433 * - avoid special handling of OP_NOP in passes
13434 * - move code inserting instructions into one function/macro.
13435 * - try a coalescing phase after liveness analysis
13436 * - add float -> vreg conversion + local optimizations on !x86
13437 * - figure out how to handle decomposed branches during optimizations, ie.
13438 * compare+branch, op_jump_table+op_br etc.
13439 * - promote RuntimeXHandles to vregs
13440 * - vtype cleanups:
13441 * - add a NEW_VARLOADA_VREG macro
13442 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
13443 * accessing vtype fields.
13444 * - get rid of I8CONST on 64 bit platforms
13445 * - dealing with the increase in code size due to branches created during opcode
13447 * - use extended basic blocks
13448 * - all parts of the JIT
13449 * - handle_global_vregs () && local regalloc
13450 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
13451 * - sources of increase in code size:
13454 * - isinst and castclass
13455 * - lvregs not allocated to global registers even if used multiple times
13456 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
13458 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
13459 * - add all micro optimizations from the old JIT
13460 * - put tree optimizations into the deadce pass
13461 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
13462 * specific function.
13463 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
13464 * fcompare + branchCC.
13465 * - create a helper function for allocating a stack slot, taking into account
13466 * MONO_CFG_HAS_SPILLUP.
13468 * - merge the ia64 switch changes.
13469 * - optimize mono_regstate2_alloc_int/float.
13470 * - fix the pessimistic handling of variables accessed in exception handler blocks.
13471 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
13472 * parts of the tree could be separated by other instructions, killing the tree
13473 * arguments, or stores killing loads etc. Also, should we fold loads into other
13474 * instructions if the result of the load is used multiple times ?
13475 * - make the REM_IMM optimization in mini-x86.c arch-independent.
13476 * - LAST MERGE: 108395.
13477 * - when returning vtypes in registers, generate IR and append it to the end of the
13478 * last bb instead of doing it in the epilog.
13479 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
13487 - When to decompose opcodes:
13488 - earlier: this makes some optimizations hard to implement, since the low level IR
13489 no longer contains the neccessary information. But it is easier to do.
13490 - later: harder to implement, enables more optimizations.
13491 - Branches inside bblocks:
13492 - created when decomposing complex opcodes.
13493 - branches to another bblock: harmless, but not tracked by the branch
13494 optimizations, so need to branch to a label at the start of the bblock.
13495 - branches to inside the same bblock: very problematic, trips up the local
13496 reg allocator. Can be fixed by spitting the current bblock, but that is a
13497 complex operation, since some local vregs can become global vregs etc.
13498 - Local/global vregs:
13499 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
13500 local register allocator.
13501 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
13502 structure, created by mono_create_var (). Assigned to hregs or the stack by
13503 the global register allocator.
13504 - When to do optimizations like alu->alu_imm:
13505 - earlier -> saves work later on since the IR will be smaller/simpler
13506 - later -> can work on more instructions
13507 - Handling of valuetypes:
13508 - When a vtype is pushed on the stack, a new temporary is created, an
13509 instruction computing its address (LDADDR) is emitted and pushed on
13510 the stack. Need to optimize cases when the vtype is used immediately as in
13511 argument passing, stloc etc.
13512 - Instead of the to_end stuff in the old JIT, simply call the function handling
13513 the values on the stack before emitting the last instruction of the bb.
13516 #endif /* DISABLE_JIT */