2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
9 * Copyright 2003-2010 Novell, Inc (http://www.novell.com)
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
27 #ifdef HAVE_SYS_TIME_H
35 #include <mono/utils/memcheck.h>
37 #include <mono/metadata/assembly.h>
38 #include <mono/metadata/attrdefs.h>
39 #include <mono/metadata/loader.h>
40 #include <mono/metadata/tabledefs.h>
41 #include <mono/metadata/class.h>
42 #include <mono/metadata/object.h>
43 #include <mono/metadata/exception.h>
44 #include <mono/metadata/opcodes.h>
45 #include <mono/metadata/mono-endian.h>
46 #include <mono/metadata/tokentype.h>
47 #include <mono/metadata/tabledefs.h>
48 #include <mono/metadata/marshal.h>
49 #include <mono/metadata/debug-helpers.h>
50 #include <mono/metadata/mono-debug.h>
51 #include <mono/metadata/gc-internal.h>
52 #include <mono/metadata/security-manager.h>
53 #include <mono/metadata/threads-types.h>
54 #include <mono/metadata/security-core-clr.h>
55 #include <mono/metadata/monitor.h>
56 #include <mono/metadata/profiler-private.h>
57 #include <mono/metadata/profiler.h>
58 #include <mono/metadata/debug-mono-symfile.h>
59 #include <mono/utils/mono-compiler.h>
60 #include <mono/utils/mono-memory-model.h>
61 #include <mono/metadata/mono-basic-block.h>
68 #include "jit-icalls.h"
70 #include "debugger-agent.h"
72 #define BRANCH_COST 10
73 #define INLINE_LENGTH_LIMIT 20
74 #define INLINE_FAILURE(msg) do { \
75 if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE)) { \
76 if (cfg->verbose_level >= 2) \
77 printf ("inline failed: %s\n", msg); \
78 goto inline_failure; \
81 #define CHECK_CFG_EXCEPTION do {\
82 if (cfg->exception_type != MONO_EXCEPTION_NONE)\
85 #define METHOD_ACCESS_FAILURE do { \
86 char *method_fname = mono_method_full_name (method, TRUE); \
87 char *cil_method_fname = mono_method_full_name (cil_method, TRUE); \
88 mono_cfg_set_exception (cfg, MONO_EXCEPTION_METHOD_ACCESS); \
89 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname); \
90 g_free (method_fname); \
91 g_free (cil_method_fname); \
92 goto exception_exit; \
94 #define FIELD_ACCESS_FAILURE do { \
95 char *method_fname = mono_method_full_name (method, TRUE); \
96 char *field_fname = mono_field_full_name (field); \
97 mono_cfg_set_exception (cfg, MONO_EXCEPTION_FIELD_ACCESS); \
98 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); \
99 g_free (method_fname); \
100 g_free (field_fname); \
101 goto exception_exit; \
103 #define GENERIC_SHARING_FAILURE(opcode) do { \
104 if (cfg->generic_sharing_context) { \
105 if (cfg->verbose_level > 2) \
106 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __LINE__); \
107 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
108 goto exception_exit; \
111 #define GSHAREDVT_FAILURE(opcode) do { \
112 if (cfg->gsharedvt) { \
113 cfg->exception_message = g_strdup_printf ("gsharedvt failed for method %s.%s.%s/%d opcode %s %s:%d", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __FILE__, __LINE__); \
114 if (cfg->verbose_level >= 2) \
115 printf ("%s\n", cfg->exception_message); \
116 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
117 goto exception_exit; \
120 #define OUT_OF_MEMORY_FAILURE do { \
121 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OUT_OF_MEMORY); \
122 goto exception_exit; \
124 #define DISABLE_AOT(cfg) do { \
125 if ((cfg)->verbose_level >= 2) \
126 printf ("AOT disabled: %s:%d\n", __FILE__, __LINE__); \
127 (cfg)->disable_aot = TRUE; \
130 /* Determine whenever 'ins' represents a load of the 'this' argument */
131 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
133 static int ldind_to_load_membase (int opcode);
134 static int stind_to_store_membase (int opcode);
136 int mono_op_to_op_imm (int opcode);
137 int mono_op_to_op_imm_noemul (int opcode);
139 MONO_API MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
141 /* helper methods signatures */
142 static MonoMethodSignature *helper_sig_class_init_trampoline = NULL;
143 static MonoMethodSignature *helper_sig_domain_get = NULL;
144 static MonoMethodSignature *helper_sig_generic_class_init_trampoline = NULL;
145 static MonoMethodSignature *helper_sig_generic_class_init_trampoline_llvm = NULL;
146 static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline = NULL;
147 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline = NULL;
148 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline_llvm = NULL;
151 * Instruction metadata
159 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
160 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
166 #if SIZEOF_REGISTER == 8 && SIZEOF_REGISTER == SIZEOF_VOID_P
171 /* keep in sync with the enum in mini.h */
174 #include "mini-ops.h"
179 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
180 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
182 * This should contain the index of the last sreg + 1. This is not the same
183 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
185 const gint8 ins_sreg_counts[] = {
186 #include "mini-ops.h"
191 #define MONO_INIT_VARINFO(vi,id) do { \
192 (vi)->range.first_use.pos.bid = 0xffff; \
198 mono_inst_set_src_registers (MonoInst *ins, int *regs)
200 ins->sreg1 = regs [0];
201 ins->sreg2 = regs [1];
202 ins->sreg3 = regs [2];
206 mono_alloc_ireg (MonoCompile *cfg)
208 return alloc_ireg (cfg);
212 mono_alloc_freg (MonoCompile *cfg)
214 return alloc_freg (cfg);
218 mono_alloc_preg (MonoCompile *cfg)
220 return alloc_preg (cfg);
224 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
226 return alloc_dreg (cfg, stack_type);
230 * mono_alloc_ireg_ref:
232 * Allocate an IREG, and mark it as holding a GC ref.
235 mono_alloc_ireg_ref (MonoCompile *cfg)
237 return alloc_ireg_ref (cfg);
241 * mono_alloc_ireg_mp:
243 * Allocate an IREG, and mark it as holding a managed pointer.
246 mono_alloc_ireg_mp (MonoCompile *cfg)
248 return alloc_ireg_mp (cfg);
252 * mono_alloc_ireg_copy:
254 * Allocate an IREG with the same GC type as VREG.
257 mono_alloc_ireg_copy (MonoCompile *cfg, guint32 vreg)
259 if (vreg_is_ref (cfg, vreg))
260 return alloc_ireg_ref (cfg);
261 else if (vreg_is_mp (cfg, vreg))
262 return alloc_ireg_mp (cfg);
264 return alloc_ireg (cfg);
268 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
273 type = mini_replace_type (type);
275 switch (type->type) {
278 case MONO_TYPE_BOOLEAN:
290 case MONO_TYPE_FNPTR:
292 case MONO_TYPE_CLASS:
293 case MONO_TYPE_STRING:
294 case MONO_TYPE_OBJECT:
295 case MONO_TYPE_SZARRAY:
296 case MONO_TYPE_ARRAY:
300 #if SIZEOF_REGISTER == 8
309 case MONO_TYPE_VALUETYPE:
310 if (type->data.klass->enumtype) {
311 type = mono_class_enum_basetype (type->data.klass);
314 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
317 case MONO_TYPE_TYPEDBYREF:
319 case MONO_TYPE_GENERICINST:
320 type = &type->data.generic_class->container_class->byval_arg;
324 g_assert (cfg->generic_sharing_context);
325 if (mini_type_var_is_vt (cfg, type))
330 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
336 mono_print_bb (MonoBasicBlock *bb, const char *msg)
341 printf ("\n%s %d: [IN: ", msg, bb->block_num);
342 for (i = 0; i < bb->in_count; ++i)
343 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
345 for (i = 0; i < bb->out_count; ++i)
346 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
348 for (tree = bb->code; tree; tree = tree->next)
349 mono_print_ins_index (-1, tree);
353 mono_create_helper_signatures (void)
355 helper_sig_domain_get = mono_create_icall_signature ("ptr");
356 helper_sig_class_init_trampoline = mono_create_icall_signature ("void");
357 helper_sig_generic_class_init_trampoline = mono_create_icall_signature ("void");
358 helper_sig_generic_class_init_trampoline_llvm = mono_create_icall_signature ("void ptr");
359 helper_sig_rgctx_lazy_fetch_trampoline = mono_create_icall_signature ("ptr ptr");
360 helper_sig_monitor_enter_exit_trampoline = mono_create_icall_signature ("void");
361 helper_sig_monitor_enter_exit_trampoline_llvm = mono_create_icall_signature ("void object");
365 * When using gsharedvt, some instatiations might be verifiable, and some might be not. i.e.
366 * foo<T> (int i) { ldarg.0; box T; }
368 #define UNVERIFIED do { \
369 if (cfg->gsharedvt) { \
370 if (cfg->verbose_level > 2) \
371 printf ("gsharedvt method failed to verify, falling back to instantiation.\n"); \
372 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
373 goto exception_exit; \
375 if (mini_get_debug_options ()->break_on_unverified) \
381 #define LOAD_ERROR do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto load_error; } while (0)
383 #define TYPE_LOAD_ERROR(klass) do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else { cfg->exception_ptr = klass; goto load_error; } } while (0)
385 #define GET_BBLOCK(cfg,tblock,ip) do { \
386 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
388 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
389 NEW_BBLOCK (cfg, (tblock)); \
390 (tblock)->cil_code = (ip); \
391 ADD_BBLOCK (cfg, (tblock)); \
395 #if defined(TARGET_X86) || defined(TARGET_AMD64)
396 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
397 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
398 (dest)->dreg = alloc_ireg_mp ((cfg)); \
399 (dest)->sreg1 = (sr1); \
400 (dest)->sreg2 = (sr2); \
401 (dest)->inst_imm = (imm); \
402 (dest)->backend.shift_amount = (shift); \
403 MONO_ADD_INS ((cfg)->cbb, (dest)); \
407 #if SIZEOF_REGISTER == 8
408 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
409 /* FIXME: Need to add many more cases */ \
410 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
412 int dr = alloc_preg (cfg); \
413 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
414 (ins)->sreg2 = widen->dreg; \
418 #define ADD_WIDEN_OP(ins, arg1, arg2)
421 #define ADD_BINOP(op) do { \
422 MONO_INST_NEW (cfg, ins, (op)); \
424 ins->sreg1 = sp [0]->dreg; \
425 ins->sreg2 = sp [1]->dreg; \
426 type_from_op (ins, sp [0], sp [1]); \
428 /* Have to insert a widening op */ \
429 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
430 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
431 MONO_ADD_INS ((cfg)->cbb, (ins)); \
432 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
435 #define ADD_UNOP(op) do { \
436 MONO_INST_NEW (cfg, ins, (op)); \
438 ins->sreg1 = sp [0]->dreg; \
439 type_from_op (ins, sp [0], NULL); \
441 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
442 MONO_ADD_INS ((cfg)->cbb, (ins)); \
443 *sp++ = mono_decompose_opcode (cfg, ins); \
446 #define ADD_BINCOND(next_block) do { \
449 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
450 cmp->sreg1 = sp [0]->dreg; \
451 cmp->sreg2 = sp [1]->dreg; \
452 type_from_op (cmp, sp [0], sp [1]); \
454 type_from_op (ins, sp [0], sp [1]); \
455 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
456 GET_BBLOCK (cfg, tblock, target); \
457 link_bblock (cfg, bblock, tblock); \
458 ins->inst_true_bb = tblock; \
459 if ((next_block)) { \
460 link_bblock (cfg, bblock, (next_block)); \
461 ins->inst_false_bb = (next_block); \
462 start_new_bblock = 1; \
464 GET_BBLOCK (cfg, tblock, ip); \
465 link_bblock (cfg, bblock, tblock); \
466 ins->inst_false_bb = tblock; \
467 start_new_bblock = 2; \
469 if (sp != stack_start) { \
470 handle_stack_args (cfg, stack_start, sp - stack_start); \
471 CHECK_UNVERIFIABLE (cfg); \
473 MONO_ADD_INS (bblock, cmp); \
474 MONO_ADD_INS (bblock, ins); \
478 * link_bblock: Links two basic blocks
480 * links two basic blocks in the control flow graph, the 'from'
481 * argument is the starting block and the 'to' argument is the block
482 * the control flow ends to after 'from'.
485 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
487 MonoBasicBlock **newa;
491 if (from->cil_code) {
493 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
495 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
498 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
500 printf ("edge from entry to exit\n");
505 for (i = 0; i < from->out_count; ++i) {
506 if (to == from->out_bb [i]) {
512 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
513 for (i = 0; i < from->out_count; ++i) {
514 newa [i] = from->out_bb [i];
522 for (i = 0; i < to->in_count; ++i) {
523 if (from == to->in_bb [i]) {
529 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
530 for (i = 0; i < to->in_count; ++i) {
531 newa [i] = to->in_bb [i];
540 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
542 link_bblock (cfg, from, to);
546 * mono_find_block_region:
548 * We mark each basic block with a region ID. We use that to avoid BB
549 * optimizations when blocks are in different regions.
552 * A region token that encodes where this region is, and information
553 * about the clause owner for this block.
555 * The region encodes the try/catch/filter clause that owns this block
556 * as well as the type. -1 is a special value that represents a block
557 * that is in none of try/catch/filter.
560 mono_find_block_region (MonoCompile *cfg, int offset)
562 MonoMethodHeader *header = cfg->header;
563 MonoExceptionClause *clause;
566 for (i = 0; i < header->num_clauses; ++i) {
567 clause = &header->clauses [i];
568 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
569 (offset < (clause->handler_offset)))
570 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
572 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
573 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
574 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
575 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
576 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
578 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
581 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
582 return ((i + 1) << 8) | clause->flags;
589 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
591 MonoMethodHeader *header = cfg->header;
592 MonoExceptionClause *clause;
596 for (i = 0; i < header->num_clauses; ++i) {
597 clause = &header->clauses [i];
598 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
599 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
600 if (clause->flags == type)
601 res = g_list_append (res, clause);
608 mono_create_spvar_for_region (MonoCompile *cfg, int region)
612 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
616 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
617 /* prevent it from being register allocated */
618 var->flags |= MONO_INST_VOLATILE;
620 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
624 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
626 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
630 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
634 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
638 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
639 /* prevent it from being register allocated */
640 var->flags |= MONO_INST_VOLATILE;
642 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
648 * Returns the type used in the eval stack when @type is loaded.
649 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
652 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
656 type = mini_replace_type (type);
657 inst->klass = klass = mono_class_from_mono_type (type);
659 inst->type = STACK_MP;
664 switch (type->type) {
666 inst->type = STACK_INV;
670 case MONO_TYPE_BOOLEAN:
676 inst->type = STACK_I4;
681 case MONO_TYPE_FNPTR:
682 inst->type = STACK_PTR;
684 case MONO_TYPE_CLASS:
685 case MONO_TYPE_STRING:
686 case MONO_TYPE_OBJECT:
687 case MONO_TYPE_SZARRAY:
688 case MONO_TYPE_ARRAY:
689 inst->type = STACK_OBJ;
693 inst->type = STACK_I8;
697 inst->type = STACK_R8;
699 case MONO_TYPE_VALUETYPE:
700 if (type->data.klass->enumtype) {
701 type = mono_class_enum_basetype (type->data.klass);
705 inst->type = STACK_VTYPE;
708 case MONO_TYPE_TYPEDBYREF:
709 inst->klass = mono_defaults.typed_reference_class;
710 inst->type = STACK_VTYPE;
712 case MONO_TYPE_GENERICINST:
713 type = &type->data.generic_class->container_class->byval_arg;
717 g_assert (cfg->generic_sharing_context);
718 if (mini_is_gsharedvt_type (cfg, type)) {
719 g_assert (cfg->gsharedvt);
720 inst->type = STACK_VTYPE;
722 inst->type = STACK_OBJ;
726 g_error ("unknown type 0x%02x in eval stack type", type->type);
731 * The following tables are used to quickly validate the IL code in type_from_op ().
734 bin_num_table [STACK_MAX] [STACK_MAX] = {
735 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
736 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
737 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
738 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
739 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV},
740 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
741 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
742 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
747 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV
750 /* reduce the size of this table */
752 bin_int_table [STACK_MAX] [STACK_MAX] = {
753 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
754 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
755 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
756 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
757 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
758 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
759 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
760 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
764 bin_comp_table [STACK_MAX] [STACK_MAX] = {
765 /* Inv i L p F & O vt */
767 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
768 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
769 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
770 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
771 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
772 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
773 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
776 /* reduce the size of this table */
778 shift_table [STACK_MAX] [STACK_MAX] = {
779 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
780 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
781 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
782 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
783 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
784 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
785 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
786 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
790 * Tables to map from the non-specific opcode to the matching
791 * type-specific opcode.
793 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
795 binops_op_map [STACK_MAX] = {
796 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD
799 /* handles from CEE_NEG to CEE_CONV_U8 */
801 unops_op_map [STACK_MAX] = {
802 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG
805 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
807 ovfops_op_map [STACK_MAX] = {
808 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2
811 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
813 ovf2ops_op_map [STACK_MAX] = {
814 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
817 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
819 ovf3ops_op_map [STACK_MAX] = {
820 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1
823 /* handles from CEE_BEQ to CEE_BLT_UN */
825 beqops_op_map [STACK_MAX] = {
826 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ
829 /* handles from CEE_CEQ to CEE_CLT_UN */
831 ceqops_op_map [STACK_MAX] = {
832 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ
836 * Sets ins->type (the type on the eval stack) according to the
837 * type of the opcode and the arguments to it.
838 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
840 * FIXME: this function sets ins->type unconditionally in some cases, but
841 * it should set it to invalid for some types (a conv.x on an object)
844 type_from_op (MonoInst *ins, MonoInst *src1, MonoInst *src2) {
846 switch (ins->opcode) {
853 /* FIXME: check unverifiable args for STACK_MP */
854 ins->type = bin_num_table [src1->type] [src2->type];
855 ins->opcode += binops_op_map [ins->type];
862 ins->type = bin_int_table [src1->type] [src2->type];
863 ins->opcode += binops_op_map [ins->type];
868 ins->type = shift_table [src1->type] [src2->type];
869 ins->opcode += binops_op_map [ins->type];
874 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
875 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
876 ins->opcode = OP_LCOMPARE;
877 else if (src1->type == STACK_R8)
878 ins->opcode = OP_FCOMPARE;
880 ins->opcode = OP_ICOMPARE;
882 case OP_ICOMPARE_IMM:
883 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
884 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
885 ins->opcode = OP_LCOMPARE_IMM;
897 ins->opcode += beqops_op_map [src1->type];
900 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
901 ins->opcode += ceqops_op_map [src1->type];
907 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
908 ins->opcode += ceqops_op_map [src1->type];
912 ins->type = neg_table [src1->type];
913 ins->opcode += unops_op_map [ins->type];
916 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
917 ins->type = src1->type;
919 ins->type = STACK_INV;
920 ins->opcode += unops_op_map [ins->type];
926 ins->type = STACK_I4;
927 ins->opcode += unops_op_map [src1->type];
930 ins->type = STACK_R8;
931 switch (src1->type) {
934 ins->opcode = OP_ICONV_TO_R_UN;
937 ins->opcode = OP_LCONV_TO_R_UN;
941 case CEE_CONV_OVF_I1:
942 case CEE_CONV_OVF_U1:
943 case CEE_CONV_OVF_I2:
944 case CEE_CONV_OVF_U2:
945 case CEE_CONV_OVF_I4:
946 case CEE_CONV_OVF_U4:
947 ins->type = STACK_I4;
948 ins->opcode += ovf3ops_op_map [src1->type];
950 case CEE_CONV_OVF_I_UN:
951 case CEE_CONV_OVF_U_UN:
952 ins->type = STACK_PTR;
953 ins->opcode += ovf2ops_op_map [src1->type];
955 case CEE_CONV_OVF_I1_UN:
956 case CEE_CONV_OVF_I2_UN:
957 case CEE_CONV_OVF_I4_UN:
958 case CEE_CONV_OVF_U1_UN:
959 case CEE_CONV_OVF_U2_UN:
960 case CEE_CONV_OVF_U4_UN:
961 ins->type = STACK_I4;
962 ins->opcode += ovf2ops_op_map [src1->type];
965 ins->type = STACK_PTR;
966 switch (src1->type) {
968 ins->opcode = OP_ICONV_TO_U;
972 #if SIZEOF_VOID_P == 8
973 ins->opcode = OP_LCONV_TO_U;
975 ins->opcode = OP_MOVE;
979 ins->opcode = OP_LCONV_TO_U;
982 ins->opcode = OP_FCONV_TO_U;
988 ins->type = STACK_I8;
989 ins->opcode += unops_op_map [src1->type];
991 case CEE_CONV_OVF_I8:
992 case CEE_CONV_OVF_U8:
993 ins->type = STACK_I8;
994 ins->opcode += ovf3ops_op_map [src1->type];
996 case CEE_CONV_OVF_U8_UN:
997 case CEE_CONV_OVF_I8_UN:
998 ins->type = STACK_I8;
999 ins->opcode += ovf2ops_op_map [src1->type];
1003 ins->type = STACK_R8;
1004 ins->opcode += unops_op_map [src1->type];
1007 ins->type = STACK_R8;
1011 ins->type = STACK_I4;
1012 ins->opcode += ovfops_op_map [src1->type];
1015 case CEE_CONV_OVF_I:
1016 case CEE_CONV_OVF_U:
1017 ins->type = STACK_PTR;
1018 ins->opcode += ovfops_op_map [src1->type];
1021 case CEE_ADD_OVF_UN:
1023 case CEE_MUL_OVF_UN:
1025 case CEE_SUB_OVF_UN:
1026 ins->type = bin_num_table [src1->type] [src2->type];
1027 ins->opcode += ovfops_op_map [src1->type];
1028 if (ins->type == STACK_R8)
1029 ins->type = STACK_INV;
1031 case OP_LOAD_MEMBASE:
1032 ins->type = STACK_PTR;
1034 case OP_LOADI1_MEMBASE:
1035 case OP_LOADU1_MEMBASE:
1036 case OP_LOADI2_MEMBASE:
1037 case OP_LOADU2_MEMBASE:
1038 case OP_LOADI4_MEMBASE:
1039 case OP_LOADU4_MEMBASE:
1040 ins->type = STACK_PTR;
1042 case OP_LOADI8_MEMBASE:
1043 ins->type = STACK_I8;
1045 case OP_LOADR4_MEMBASE:
1046 case OP_LOADR8_MEMBASE:
1047 ins->type = STACK_R8;
1050 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
1054 if (ins->type == STACK_MP)
1055 ins->klass = mono_defaults.object_class;
1060 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
1066 param_table [STACK_MAX] [STACK_MAX] = {
1071 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
1075 switch (args->type) {
1085 for (i = 0; i < sig->param_count; ++i) {
1086 switch (args [i].type) {
1090 if (!sig->params [i]->byref)
1094 if (sig->params [i]->byref)
1096 switch (sig->params [i]->type) {
1097 case MONO_TYPE_CLASS:
1098 case MONO_TYPE_STRING:
1099 case MONO_TYPE_OBJECT:
1100 case MONO_TYPE_SZARRAY:
1101 case MONO_TYPE_ARRAY:
1108 if (sig->params [i]->byref)
1110 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1119 /*if (!param_table [args [i].type] [sig->params [i]->type])
1127 * When we need a pointer to the current domain many times in a method, we
1128 * call mono_domain_get() once and we store the result in a local variable.
1129 * This function returns the variable that represents the MonoDomain*.
1131 inline static MonoInst *
1132 mono_get_domainvar (MonoCompile *cfg)
1134 if (!cfg->domainvar)
1135 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1136 return cfg->domainvar;
1140 * The got_var contains the address of the Global Offset Table when AOT
1144 mono_get_got_var (MonoCompile *cfg)
1146 #ifdef MONO_ARCH_NEED_GOT_VAR
1147 if (!cfg->compile_aot)
1149 if (!cfg->got_var) {
1150 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1152 return cfg->got_var;
1159 mono_get_vtable_var (MonoCompile *cfg)
1161 g_assert (cfg->generic_sharing_context);
1163 if (!cfg->rgctx_var) {
1164 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1165 /* force the var to be stack allocated */
1166 cfg->rgctx_var->flags |= MONO_INST_VOLATILE;
1169 return cfg->rgctx_var;
1173 type_from_stack_type (MonoInst *ins) {
1174 switch (ins->type) {
1175 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1176 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1177 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1178 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1180 return &ins->klass->this_arg;
1181 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1182 case STACK_VTYPE: return &ins->klass->byval_arg;
1184 g_error ("stack type %d to monotype not handled\n", ins->type);
1189 static G_GNUC_UNUSED int
1190 type_to_stack_type (MonoType *t)
1192 t = mono_type_get_underlying_type (t);
1196 case MONO_TYPE_BOOLEAN:
1199 case MONO_TYPE_CHAR:
1206 case MONO_TYPE_FNPTR:
1208 case MONO_TYPE_CLASS:
1209 case MONO_TYPE_STRING:
1210 case MONO_TYPE_OBJECT:
1211 case MONO_TYPE_SZARRAY:
1212 case MONO_TYPE_ARRAY:
1220 case MONO_TYPE_VALUETYPE:
1221 case MONO_TYPE_TYPEDBYREF:
1223 case MONO_TYPE_GENERICINST:
1224 if (mono_type_generic_inst_is_valuetype (t))
1230 g_assert_not_reached ();
1237 array_access_to_klass (int opcode)
1241 return mono_defaults.byte_class;
1243 return mono_defaults.uint16_class;
1246 return mono_defaults.int_class;
1249 return mono_defaults.sbyte_class;
1252 return mono_defaults.int16_class;
1255 return mono_defaults.int32_class;
1257 return mono_defaults.uint32_class;
1260 return mono_defaults.int64_class;
1263 return mono_defaults.single_class;
1266 return mono_defaults.double_class;
1267 case CEE_LDELEM_REF:
1268 case CEE_STELEM_REF:
1269 return mono_defaults.object_class;
1271 g_assert_not_reached ();
1277 * We try to share variables when possible
1280 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1285 /* inlining can result in deeper stacks */
1286 if (slot >= cfg->header->max_stack)
1287 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1289 pos = ins->type - 1 + slot * STACK_MAX;
1291 switch (ins->type) {
1298 if ((vnum = cfg->intvars [pos]))
1299 return cfg->varinfo [vnum];
1300 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1301 cfg->intvars [pos] = res->inst_c0;
1304 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1310 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1313 * Don't use this if a generic_context is set, since that means AOT can't
1314 * look up the method using just the image+token.
1315 * table == 0 means this is a reference made from a wrapper.
1317 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1318 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1319 jump_info_token->image = image;
1320 jump_info_token->token = token;
1321 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1326 * This function is called to handle items that are left on the evaluation stack
1327 * at basic block boundaries. What happens is that we save the values to local variables
1328 * and we reload them later when first entering the target basic block (with the
1329 * handle_loaded_temps () function).
1330 * A single joint point will use the same variables (stored in the array bb->out_stack or
1331 * bb->in_stack, if the basic block is before or after the joint point).
1333 * This function needs to be called _before_ emitting the last instruction of
1334 * the bb (i.e. before emitting a branch).
1335 * If the stack merge fails at a join point, cfg->unverifiable is set.
1338 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1341 MonoBasicBlock *bb = cfg->cbb;
1342 MonoBasicBlock *outb;
1343 MonoInst *inst, **locals;
1348 if (cfg->verbose_level > 3)
1349 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1350 if (!bb->out_scount) {
1351 bb->out_scount = count;
1352 //printf ("bblock %d has out:", bb->block_num);
1354 for (i = 0; i < bb->out_count; ++i) {
1355 outb = bb->out_bb [i];
1356 /* exception handlers are linked, but they should not be considered for stack args */
1357 if (outb->flags & BB_EXCEPTION_HANDLER)
1359 //printf (" %d", outb->block_num);
1360 if (outb->in_stack) {
1362 bb->out_stack = outb->in_stack;
1368 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1369 for (i = 0; i < count; ++i) {
1371 * try to reuse temps already allocated for this purpouse, if they occupy the same
1372 * stack slot and if they are of the same type.
1373 * This won't cause conflicts since if 'local' is used to
1374 * store one of the values in the in_stack of a bblock, then
1375 * the same variable will be used for the same outgoing stack
1377 * This doesn't work when inlining methods, since the bblocks
1378 * in the inlined methods do not inherit their in_stack from
1379 * the bblock they are inlined to. See bug #58863 for an
1382 if (cfg->inlined_method)
1383 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1385 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1390 for (i = 0; i < bb->out_count; ++i) {
1391 outb = bb->out_bb [i];
1392 /* exception handlers are linked, but they should not be considered for stack args */
1393 if (outb->flags & BB_EXCEPTION_HANDLER)
1395 if (outb->in_scount) {
1396 if (outb->in_scount != bb->out_scount) {
1397 cfg->unverifiable = TRUE;
1400 continue; /* check they are the same locals */
1402 outb->in_scount = count;
1403 outb->in_stack = bb->out_stack;
1406 locals = bb->out_stack;
1408 for (i = 0; i < count; ++i) {
1409 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1410 inst->cil_code = sp [i]->cil_code;
1411 sp [i] = locals [i];
1412 if (cfg->verbose_level > 3)
1413 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1417 * It is possible that the out bblocks already have in_stack assigned, and
1418 * the in_stacks differ. In this case, we will store to all the different
1425 /* Find a bblock which has a different in_stack */
1427 while (bindex < bb->out_count) {
1428 outb = bb->out_bb [bindex];
1429 /* exception handlers are linked, but they should not be considered for stack args */
1430 if (outb->flags & BB_EXCEPTION_HANDLER) {
1434 if (outb->in_stack != locals) {
1435 for (i = 0; i < count; ++i) {
1436 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1437 inst->cil_code = sp [i]->cil_code;
1438 sp [i] = locals [i];
1439 if (cfg->verbose_level > 3)
1440 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1442 locals = outb->in_stack;
1451 /* Emit code which loads interface_offsets [klass->interface_id]
1452 * The array is stored in memory before vtable.
1455 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1457 if (cfg->compile_aot) {
1458 int ioffset_reg = alloc_preg (cfg);
1459 int iid_reg = alloc_preg (cfg);
1461 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1462 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1463 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1466 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1471 mini_emit_interface_bitmap_check (MonoCompile *cfg, int intf_bit_reg, int base_reg, int offset, MonoClass *klass)
1473 int ibitmap_reg = alloc_preg (cfg);
1474 #ifdef COMPRESSED_INTERFACE_BITMAP
1476 MonoInst *res, *ins;
1477 NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, ibitmap_reg, base_reg, offset);
1478 MONO_ADD_INS (cfg->cbb, ins);
1480 if (cfg->compile_aot)
1481 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_IID, klass);
1483 EMIT_NEW_ICONST (cfg, args [1], klass->interface_id);
1484 res = mono_emit_jit_icall (cfg, mono_class_interface_match, args);
1485 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, intf_bit_reg, res->dreg);
1487 int ibitmap_byte_reg = alloc_preg (cfg);
1489 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, base_reg, offset);
1491 if (cfg->compile_aot) {
1492 int iid_reg = alloc_preg (cfg);
1493 int shifted_iid_reg = alloc_preg (cfg);
1494 int ibitmap_byte_address_reg = alloc_preg (cfg);
1495 int masked_iid_reg = alloc_preg (cfg);
1496 int iid_one_bit_reg = alloc_preg (cfg);
1497 int iid_bit_reg = alloc_preg (cfg);
1498 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1499 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1500 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1501 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1502 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1503 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1504 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1505 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1507 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1508 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1514 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1515 * stored in "klass_reg" implements the interface "klass".
1518 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1520 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, interface_bitmap), klass);
1524 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1525 * stored in "vtable_reg" implements the interface "klass".
1528 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1530 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, interface_bitmap), klass);
1534 * Emit code which checks whenever the interface id of @klass is smaller than
1535 * than the value given by max_iid_reg.
1538 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1539 MonoBasicBlock *false_target)
1541 if (cfg->compile_aot) {
1542 int iid_reg = alloc_preg (cfg);
1543 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1544 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1547 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1549 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1551 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1554 /* Same as above, but obtains max_iid from a vtable */
1556 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1557 MonoBasicBlock *false_target)
1559 int max_iid_reg = alloc_preg (cfg);
1561 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, max_interface_id));
1562 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1565 /* Same as above, but obtains max_iid from a klass */
1567 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1568 MonoBasicBlock *false_target)
1570 int max_iid_reg = alloc_preg (cfg);
1572 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, max_interface_id));
1573 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1577 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1579 int idepth_reg = alloc_preg (cfg);
1580 int stypes_reg = alloc_preg (cfg);
1581 int stype = alloc_preg (cfg);
1583 mono_class_setup_supertypes (klass);
1585 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1586 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1587 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1588 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1590 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1591 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1593 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1594 } else if (cfg->compile_aot) {
1595 int const_reg = alloc_preg (cfg);
1596 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1597 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1599 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1601 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1605 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1607 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1611 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1613 int intf_reg = alloc_preg (cfg);
1615 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1616 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1617 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1619 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1621 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1625 * Variant of the above that takes a register to the class, not the vtable.
1628 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1630 int intf_bit_reg = alloc_preg (cfg);
1632 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1633 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1634 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1636 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1638 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1642 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1645 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1646 } else if (cfg->compile_aot) {
1647 int const_reg = alloc_preg (cfg);
1648 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1649 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1651 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1653 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1657 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1659 mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1663 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1665 if (cfg->compile_aot) {
1666 int const_reg = alloc_preg (cfg);
1667 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1668 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1670 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1672 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1676 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1679 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1682 int rank_reg = alloc_preg (cfg);
1683 int eclass_reg = alloc_preg (cfg);
1685 g_assert (!klass_inst);
1686 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, rank));
1687 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1688 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1689 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1690 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
1691 if (klass->cast_class == mono_defaults.object_class) {
1692 int parent_reg = alloc_preg (cfg);
1693 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
1694 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1695 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1696 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1697 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1698 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1699 } else if (klass->cast_class == mono_defaults.enum_class) {
1700 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1701 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1702 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1704 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1705 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1708 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1709 /* Check that the object is a vector too */
1710 int bounds_reg = alloc_preg (cfg);
1711 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
1712 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1713 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1716 int idepth_reg = alloc_preg (cfg);
1717 int stypes_reg = alloc_preg (cfg);
1718 int stype = alloc_preg (cfg);
1720 mono_class_setup_supertypes (klass);
1722 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1723 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1724 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1725 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1727 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1728 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1729 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1734 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1736 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1740 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1744 g_assert (val == 0);
1749 if ((size <= 4) && (size <= align)) {
1752 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1755 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1758 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1760 #if SIZEOF_REGISTER == 8
1762 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1768 val_reg = alloc_preg (cfg);
1770 if (SIZEOF_REGISTER == 8)
1771 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1773 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1776 /* This could be optimized further if neccesary */
1778 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1785 #if !NO_UNALIGNED_ACCESS
1786 if (SIZEOF_REGISTER == 8) {
1788 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1793 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1801 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1806 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1811 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1818 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1825 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1826 g_assert (size < 10000);
1829 /* This could be optimized further if neccesary */
1831 cur_reg = alloc_preg (cfg);
1832 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1833 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1840 #if !NO_UNALIGNED_ACCESS
1841 if (SIZEOF_REGISTER == 8) {
1843 cur_reg = alloc_preg (cfg);
1844 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1845 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1854 cur_reg = alloc_preg (cfg);
1855 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1856 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1862 cur_reg = alloc_preg (cfg);
1863 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1864 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1870 cur_reg = alloc_preg (cfg);
1871 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1872 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1880 emit_tls_set (MonoCompile *cfg, int sreg1, int tls_key)
1884 if (cfg->compile_aot) {
1885 EMIT_NEW_TLS_OFFSETCONST (cfg, c, tls_key);
1886 MONO_INST_NEW (cfg, ins, OP_TLS_SET_REG);
1888 ins->sreg2 = c->dreg;
1889 MONO_ADD_INS (cfg->cbb, ins);
1891 MONO_INST_NEW (cfg, ins, OP_TLS_SET);
1893 ins->inst_offset = mini_get_tls_offset (tls_key);
1894 MONO_ADD_INS (cfg->cbb, ins);
1901 * Emit IR to push the current LMF onto the LMF stack.
1904 emit_push_lmf (MonoCompile *cfg)
1907 * Emit IR to push the LMF:
1908 * lmf_addr = <lmf_addr from tls>
1909 * lmf->lmf_addr = lmf_addr
1910 * lmf->prev_lmf = *lmf_addr
1913 int lmf_reg, prev_lmf_reg;
1914 MonoInst *ins, *lmf_ins;
1919 if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
1920 /* Load current lmf */
1921 lmf_ins = mono_get_lmf_intrinsic (cfg);
1923 MONO_ADD_INS (cfg->cbb, lmf_ins);
1924 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
1925 lmf_reg = ins->dreg;
1926 /* Save previous_lmf */
1927 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, G_STRUCT_OFFSET (MonoLMF, previous_lmf), lmf_ins->dreg);
1929 emit_tls_set (cfg, lmf_reg, TLS_KEY_LMF);
1932 * Store lmf_addr in a variable, so it can be allocated to a global register.
1934 if (!cfg->lmf_addr_var)
1935 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1937 lmf_ins = mono_get_lmf_addr_intrinsic (cfg);
1939 MONO_ADD_INS (cfg->cbb, lmf_ins);
1941 lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
1942 lmf_ins->dreg = cfg->lmf_addr_var->dreg;
1944 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
1945 lmf_reg = ins->dreg;
1947 prev_lmf_reg = alloc_preg (cfg);
1948 /* Save previous_lmf */
1949 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, cfg->lmf_addr_var->dreg, 0);
1950 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, G_STRUCT_OFFSET (MonoLMF, previous_lmf), prev_lmf_reg);
1952 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, cfg->lmf_addr_var->dreg, 0, lmf_reg);
1959 * Emit IR to pop the current LMF from the LMF stack.
1962 emit_pop_lmf (MonoCompile *cfg)
1964 int lmf_reg, lmf_addr_reg, prev_lmf_reg;
1970 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
1971 lmf_reg = ins->dreg;
1973 if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
1974 /* Load previous_lmf */
1975 prev_lmf_reg = alloc_preg (cfg);
1976 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
1978 emit_tls_set (cfg, prev_lmf_reg, TLS_KEY_LMF);
1981 * Emit IR to pop the LMF:
1982 * *(lmf->lmf_addr) = lmf->prev_lmf
1984 /* This could be called before emit_push_lmf () */
1985 if (!cfg->lmf_addr_var)
1986 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1987 lmf_addr_reg = cfg->lmf_addr_var->dreg;
1989 prev_lmf_reg = alloc_preg (cfg);
1990 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
1991 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_addr_reg, 0, prev_lmf_reg);
1996 ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
1999 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2002 type = mini_get_basic_type_from_generic (gsctx, type);
2003 type = mini_replace_type (type);
2004 switch (type->type) {
2005 case MONO_TYPE_VOID:
2006 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALL_MEMBASE: OP_VOIDCALL;
2009 case MONO_TYPE_BOOLEAN:
2012 case MONO_TYPE_CHAR:
2015 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2019 case MONO_TYPE_FNPTR:
2020 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2021 case MONO_TYPE_CLASS:
2022 case MONO_TYPE_STRING:
2023 case MONO_TYPE_OBJECT:
2024 case MONO_TYPE_SZARRAY:
2025 case MONO_TYPE_ARRAY:
2026 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2029 return calli? OP_LCALL_REG: virt? OP_LCALL_MEMBASE: OP_LCALL;
2032 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
2033 case MONO_TYPE_VALUETYPE:
2034 if (type->data.klass->enumtype) {
2035 type = mono_class_enum_basetype (type->data.klass);
2038 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2039 case MONO_TYPE_TYPEDBYREF:
2040 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2041 case MONO_TYPE_GENERICINST:
2042 type = &type->data.generic_class->container_class->byval_arg;
2045 case MONO_TYPE_MVAR:
2047 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2049 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
2055 * target_type_is_incompatible:
2056 * @cfg: MonoCompile context
2058 * Check that the item @arg on the evaluation stack can be stored
2059 * in the target type (can be a local, or field, etc).
2060 * The cfg arg can be used to check if we need verification or just
2063 * Returns: non-0 value if arg can't be stored on a target.
2066 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
2068 MonoType *simple_type;
2071 target = mini_replace_type (target);
2072 if (target->byref) {
2073 /* FIXME: check that the pointed to types match */
2074 if (arg->type == STACK_MP)
2075 return arg->klass != mono_class_from_mono_type (target);
2076 if (arg->type == STACK_PTR)
2081 simple_type = mono_type_get_underlying_type (target);
2082 switch (simple_type->type) {
2083 case MONO_TYPE_VOID:
2087 case MONO_TYPE_BOOLEAN:
2090 case MONO_TYPE_CHAR:
2093 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
2097 /* STACK_MP is needed when setting pinned locals */
2098 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2103 case MONO_TYPE_FNPTR:
2105 * Some opcodes like ldloca returns 'transient pointers' which can be stored in
2106 * in native int. (#688008).
2108 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2111 case MONO_TYPE_CLASS:
2112 case MONO_TYPE_STRING:
2113 case MONO_TYPE_OBJECT:
2114 case MONO_TYPE_SZARRAY:
2115 case MONO_TYPE_ARRAY:
2116 if (arg->type != STACK_OBJ)
2118 /* FIXME: check type compatibility */
2122 if (arg->type != STACK_I8)
2127 if (arg->type != STACK_R8)
2130 case MONO_TYPE_VALUETYPE:
2131 if (arg->type != STACK_VTYPE)
2133 klass = mono_class_from_mono_type (simple_type);
2134 if (klass != arg->klass)
2137 case MONO_TYPE_TYPEDBYREF:
2138 if (arg->type != STACK_VTYPE)
2140 klass = mono_class_from_mono_type (simple_type);
2141 if (klass != arg->klass)
2144 case MONO_TYPE_GENERICINST:
2145 if (mono_type_generic_inst_is_valuetype (simple_type)) {
2146 if (arg->type != STACK_VTYPE)
2148 klass = mono_class_from_mono_type (simple_type);
2149 if (klass != arg->klass)
2153 if (arg->type != STACK_OBJ)
2155 /* FIXME: check type compatibility */
2159 case MONO_TYPE_MVAR:
2160 g_assert (cfg->generic_sharing_context);
2161 if (mini_type_var_is_vt (cfg, simple_type)) {
2162 if (arg->type != STACK_VTYPE)
2165 if (arg->type != STACK_OBJ)
2170 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
2176 * Prepare arguments for passing to a function call.
2177 * Return a non-zero value if the arguments can't be passed to the given
2179 * The type checks are not yet complete and some conversions may need
2180 * casts on 32 or 64 bit architectures.
2182 * FIXME: implement this using target_type_is_incompatible ()
2185 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
2187 MonoType *simple_type;
2191 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
2195 for (i = 0; i < sig->param_count; ++i) {
2196 if (sig->params [i]->byref) {
2197 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
2201 simple_type = sig->params [i];
2202 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
2204 switch (simple_type->type) {
2205 case MONO_TYPE_VOID:
2210 case MONO_TYPE_BOOLEAN:
2213 case MONO_TYPE_CHAR:
2216 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2222 case MONO_TYPE_FNPTR:
2223 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2226 case MONO_TYPE_CLASS:
2227 case MONO_TYPE_STRING:
2228 case MONO_TYPE_OBJECT:
2229 case MONO_TYPE_SZARRAY:
2230 case MONO_TYPE_ARRAY:
2231 if (args [i]->type != STACK_OBJ)
2236 if (args [i]->type != STACK_I8)
2241 if (args [i]->type != STACK_R8)
2244 case MONO_TYPE_VALUETYPE:
2245 if (simple_type->data.klass->enumtype) {
2246 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2249 if (args [i]->type != STACK_VTYPE)
2252 case MONO_TYPE_TYPEDBYREF:
2253 if (args [i]->type != STACK_VTYPE)
2256 case MONO_TYPE_GENERICINST:
2257 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2260 case MONO_TYPE_MVAR:
2262 if (args [i]->type != STACK_VTYPE)
2266 g_error ("unknown type 0x%02x in check_call_signature",
2274 callvirt_to_call (int opcode)
2277 case OP_CALL_MEMBASE:
2279 case OP_VOIDCALL_MEMBASE:
2281 case OP_FCALL_MEMBASE:
2283 case OP_VCALL_MEMBASE:
2285 case OP_LCALL_MEMBASE:
2288 g_assert_not_reached ();
2294 #ifdef MONO_ARCH_HAVE_IMT
2295 /* Either METHOD or IMT_ARG needs to be set */
2297 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoMethod *method, MonoInst *imt_arg)
2301 if (COMPILE_LLVM (cfg)) {
2302 method_reg = alloc_preg (cfg);
2305 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2306 } else if (cfg->compile_aot) {
2307 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, method, MONO_PATCH_INFO_METHODCONST);
2310 MONO_INST_NEW (cfg, ins, OP_PCONST);
2311 ins->inst_p0 = method;
2312 ins->dreg = method_reg;
2313 MONO_ADD_INS (cfg->cbb, ins);
2317 call->imt_arg_reg = method_reg;
2319 #ifdef MONO_ARCH_IMT_REG
2320 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2322 /* Need this to keep the IMT arg alive */
2323 mono_call_inst_add_outarg_reg (cfg, call, method_reg, 0, FALSE);
2328 #ifdef MONO_ARCH_IMT_REG
2329 method_reg = alloc_preg (cfg);
2332 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2333 } else if (cfg->compile_aot) {
2334 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, method, MONO_PATCH_INFO_METHODCONST);
2337 MONO_INST_NEW (cfg, ins, OP_PCONST);
2338 ins->inst_p0 = method;
2339 ins->dreg = method_reg;
2340 MONO_ADD_INS (cfg->cbb, ins);
2343 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2345 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2350 static MonoJumpInfo *
2351 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2353 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2357 ji->data.target = target;
2363 mini_class_check_context_used (MonoCompile *cfg, MonoClass *klass)
2365 if (cfg->generic_sharing_context)
2366 return mono_class_check_context_used (klass);
2372 mini_method_check_context_used (MonoCompile *cfg, MonoMethod *method)
2374 if (cfg->generic_sharing_context)
2375 return mono_method_check_context_used (method);
2381 * check_method_sharing:
2383 * Check whenever the vtable or an mrgctx needs to be passed when calling CMETHOD.
2386 check_method_sharing (MonoCompile *cfg, MonoMethod *cmethod, gboolean *out_pass_vtable, gboolean *out_pass_mrgctx)
2388 gboolean pass_vtable = FALSE;
2389 gboolean pass_mrgctx = FALSE;
2391 if (((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
2392 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
2393 gboolean sharable = FALSE;
2395 if (mono_method_is_generic_sharable (cmethod, TRUE)) {
2398 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
2399 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
2400 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
2402 sharable = sharing_enabled && context_sharable;
2406 * Pass vtable iff target method might
2407 * be shared, which means that sharing
2408 * is enabled for its class and its
2409 * context is sharable (and it's not a
2412 if (sharable && !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
2416 if (mini_method_get_context (cmethod) &&
2417 mini_method_get_context (cmethod)->method_inst) {
2418 g_assert (!pass_vtable);
2420 if (mono_method_is_generic_sharable (cmethod, TRUE)) {
2423 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
2424 MonoGenericContext *context = mini_method_get_context (cmethod);
2425 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
2427 if (sharing_enabled && context_sharable)
2429 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, mono_method_signature (cmethod)))
2434 if (out_pass_vtable)
2435 *out_pass_vtable = pass_vtable;
2436 if (out_pass_mrgctx)
2437 *out_pass_mrgctx = pass_mrgctx;
2440 inline static MonoCallInst *
2441 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2442 MonoInst **args, int calli, int virtual, int tail, int rgctx, int unbox_trampoline)
2446 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2451 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2453 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
2456 call->signature = sig;
2457 call->rgctx_reg = rgctx;
2458 sig_ret = mini_replace_type (sig->ret);
2460 type_to_eval_stack_type ((cfg), sig_ret, &call->inst);
2463 if (mini_type_is_vtype (cfg, sig_ret)) {
2464 call->vret_var = cfg->vret_addr;
2465 //g_assert_not_reached ();
2467 } else if (mini_type_is_vtype (cfg, sig_ret)) {
2468 MonoInst *temp = mono_compile_create_var (cfg, sig_ret, OP_LOCAL);
2471 temp->backend.is_pinvoke = sig->pinvoke;
2474 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2475 * address of return value to increase optimization opportunities.
2476 * Before vtype decomposition, the dreg of the call ins itself represents the
2477 * fact the call modifies the return value. After decomposition, the call will
2478 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2479 * will be transformed into an LDADDR.
2481 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2482 loada->dreg = alloc_preg (cfg);
2483 loada->inst_p0 = temp;
2484 /* We reference the call too since call->dreg could change during optimization */
2485 loada->inst_p1 = call;
2486 MONO_ADD_INS (cfg->cbb, loada);
2488 call->inst.dreg = temp->dreg;
2490 call->vret_var = loada;
2491 } else if (!MONO_TYPE_IS_VOID (sig_ret))
2492 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2494 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2495 if (COMPILE_SOFT_FLOAT (cfg)) {
2497 * If the call has a float argument, we would need to do an r8->r4 conversion using
2498 * an icall, but that cannot be done during the call sequence since it would clobber
2499 * the call registers + the stack. So we do it before emitting the call.
2501 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2503 MonoInst *in = call->args [i];
2505 if (i >= sig->hasthis)
2506 t = sig->params [i - sig->hasthis];
2508 t = &mono_defaults.int_class->byval_arg;
2509 t = mono_type_get_underlying_type (t);
2511 if (!t->byref && t->type == MONO_TYPE_R4) {
2512 MonoInst *iargs [1];
2516 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2518 /* The result will be in an int vreg */
2519 call->args [i] = conv;
2525 call->need_unbox_trampoline = unbox_trampoline;
2528 if (COMPILE_LLVM (cfg))
2529 mono_llvm_emit_call (cfg, call);
2531 mono_arch_emit_call (cfg, call);
2533 mono_arch_emit_call (cfg, call);
2536 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2537 cfg->flags |= MONO_CFG_HAS_CALLS;
2543 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2545 #ifdef MONO_ARCH_RGCTX_REG
2546 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2547 cfg->uses_rgctx_reg = TRUE;
2548 call->rgctx_reg = TRUE;
2550 call->rgctx_arg_reg = rgctx_reg;
2557 inline static MonoInst*
2558 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *imt_arg, MonoInst *rgctx_arg)
2564 rgctx_reg = mono_alloc_preg (cfg);
2565 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2568 call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE, rgctx_arg ? TRUE : FALSE, FALSE);
2570 call->inst.sreg1 = addr->dreg;
2573 emit_imt_argument (cfg, call, NULL, imt_arg);
2575 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2578 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2580 return (MonoInst*)call;
2584 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2587 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type);
2589 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2592 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig, gboolean tail,
2593 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *rgctx_arg)
2595 #ifndef DISABLE_REMOTING
2596 gboolean might_be_remote = FALSE;
2598 gboolean virtual = this != NULL;
2599 gboolean enable_for_aot = TRUE;
2603 gboolean need_unbox_trampoline;
2606 sig = mono_method_signature (method);
2609 rgctx_reg = mono_alloc_preg (cfg);
2610 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2613 if (method->string_ctor) {
2614 /* Create the real signature */
2615 /* FIXME: Cache these */
2616 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2617 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2622 context_used = mini_method_check_context_used (cfg, method);
2624 #ifndef DISABLE_REMOTING
2625 might_be_remote = this && sig->hasthis &&
2626 (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) &&
2627 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && (!MONO_CHECK_THIS (this) || context_used);
2629 if (might_be_remote && context_used) {
2632 g_assert (cfg->generic_sharing_context);
2634 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2636 return mono_emit_calli (cfg, sig, args, addr, NULL, NULL);
2640 need_unbox_trampoline = method->klass == mono_defaults.object_class || (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE);
2642 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, tail, rgctx_arg ? TRUE : FALSE, need_unbox_trampoline);
2644 #ifndef DISABLE_REMOTING
2645 if (might_be_remote)
2646 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2649 call->method = method;
2650 call->inst.flags |= MONO_INST_HAS_METHOD;
2651 call->inst.inst_left = this;
2652 call->tail_call = tail;
2655 int vtable_reg, slot_reg, this_reg;
2658 this_reg = this->dreg;
2660 if (ARCH_HAVE_DELEGATE_TRAMPOLINES && (method->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (method->name, "Invoke")) {
2661 MonoInst *dummy_use;
2663 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2665 /* Make a call to delegate->invoke_impl */
2666 call->inst.inst_basereg = this_reg;
2667 call->inst.inst_offset = G_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2668 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2670 /* We must emit a dummy use here because the delegate trampoline will
2671 replace the 'this' argument with the delegate target making this activation
2672 no longer a root for the delegate.
2673 This is an issue for delegates that target collectible code such as dynamic
2674 methods of GC'able assemblies.
2676 For a test case look into #667921.
2678 FIXME: a dummy use is not the best way to do it as the local register allocator
2679 will put it on a caller save register and spil it around the call.
2680 Ideally, we would either put it on a callee save register or only do the store part.
2682 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [0]);
2684 return (MonoInst*)call;
2687 if ((!cfg->compile_aot || enable_for_aot) &&
2688 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2689 (MONO_METHOD_IS_FINAL (method) &&
2690 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2691 !(mono_class_is_marshalbyref (method->klass) && context_used)) {
2693 * the method is not virtual, we just need to ensure this is not null
2694 * and then we can call the method directly.
2696 #ifndef DISABLE_REMOTING
2697 if (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) {
2699 * The check above ensures method is not gshared, this is needed since
2700 * gshared methods can't have wrappers.
2702 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2706 if (!method->string_ctor)
2707 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2709 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2710 } else if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2712 * the method is virtual, but we can statically dispatch since either
2713 * it's class or the method itself are sealed.
2714 * But first we need to ensure it's not a null reference.
2716 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2718 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2720 vtable_reg = alloc_preg (cfg);
2721 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2722 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2724 #ifdef MONO_ARCH_HAVE_IMT
2726 guint32 imt_slot = mono_method_get_imt_slot (method);
2727 emit_imt_argument (cfg, call, call->method, imt_arg);
2728 slot_reg = vtable_reg;
2729 offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2732 if (slot_reg == -1) {
2733 slot_reg = alloc_preg (cfg);
2734 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2735 offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
2738 slot_reg = vtable_reg;
2739 offset = G_STRUCT_OFFSET (MonoVTable, vtable) +
2740 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2741 #ifdef MONO_ARCH_HAVE_IMT
2743 g_assert (mono_method_signature (method)->generic_param_count);
2744 emit_imt_argument (cfg, call, call->method, imt_arg);
2749 call->inst.sreg1 = slot_reg;
2750 call->inst.inst_offset = offset;
2751 call->virtual = TRUE;
2755 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2758 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2760 return (MonoInst*)call;
2764 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2766 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), FALSE, args, this, NULL, NULL);
2770 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2777 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE, FALSE, FALSE);
2780 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2782 return (MonoInst*)call;
2786 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2788 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2792 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2796 * mono_emit_abs_call:
2798 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2800 inline static MonoInst*
2801 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2802 MonoMethodSignature *sig, MonoInst **args)
2804 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2808 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2811 if (cfg->abs_patches == NULL)
2812 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2813 g_hash_table_insert (cfg->abs_patches, ji, ji);
2814 ins = mono_emit_native_call (cfg, ji, sig, args);
2815 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2820 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2822 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2823 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2827 * Native code might return non register sized integers
2828 * without initializing the upper bits.
2830 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
2831 case OP_LOADI1_MEMBASE:
2832 widen_op = OP_ICONV_TO_I1;
2834 case OP_LOADU1_MEMBASE:
2835 widen_op = OP_ICONV_TO_U1;
2837 case OP_LOADI2_MEMBASE:
2838 widen_op = OP_ICONV_TO_I2;
2840 case OP_LOADU2_MEMBASE:
2841 widen_op = OP_ICONV_TO_U2;
2847 if (widen_op != -1) {
2848 int dreg = alloc_preg (cfg);
2851 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
2852 widen->type = ins->type;
2862 get_memcpy_method (void)
2864 static MonoMethod *memcpy_method = NULL;
2865 if (!memcpy_method) {
2866 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2868 g_error ("Old corlib found. Install a new one");
2870 return memcpy_method;
2874 create_write_barrier_bitmap (MonoCompile *cfg, MonoClass *klass, unsigned *wb_bitmap, int offset)
2876 MonoClassField *field;
2877 gpointer iter = NULL;
2879 while ((field = mono_class_get_fields (klass, &iter))) {
2882 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
2884 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
2885 if (mini_type_is_reference (cfg, mono_field_get_type (field))) {
2886 g_assert ((foffset % SIZEOF_VOID_P) == 0);
2887 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
2889 MonoClass *field_class = mono_class_from_mono_type (field->type);
2890 if (field_class->has_references)
2891 create_write_barrier_bitmap (cfg, field_class, wb_bitmap, offset + foffset);
2897 emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value)
2899 int card_table_shift_bits;
2900 gpointer card_table_mask;
2902 MonoInst *dummy_use;
2903 int nursery_shift_bits;
2904 size_t nursery_size;
2905 gboolean has_card_table_wb = FALSE;
2907 if (!cfg->gen_write_barriers)
2910 card_table = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
2912 mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
2914 #ifdef MONO_ARCH_HAVE_CARD_TABLE_WBARRIER_IN_AOT
2915 if (cfg->compile_aot)
2916 has_card_table_wb = TRUE;
2918 #ifdef MONO_ARCH_HAVE_CARD_TABLE_WBARRIER
2919 if (!cfg->compile_aot)
2920 has_card_table_wb = TRUE;
2923 if (has_card_table_wb && card_table && nursery_shift_bits > 0 && !COMPILE_LLVM (cfg)) {
2926 MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
2927 wbarrier->sreg1 = ptr->dreg;
2928 wbarrier->sreg2 = value->dreg;
2929 MONO_ADD_INS (cfg->cbb, wbarrier);
2930 } else if (card_table) {
2931 int offset_reg = alloc_preg (cfg);
2932 int card_reg = alloc_preg (cfg);
2935 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
2936 if (card_table_mask)
2937 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
2939 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
2940 * IMM's larger than 32bits.
2942 if (cfg->compile_aot) {
2943 MONO_EMIT_NEW_AOTCONST (cfg, card_reg, NULL, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR);
2945 MONO_INST_NEW (cfg, ins, OP_PCONST);
2946 ins->inst_p0 = card_table;
2947 ins->dreg = card_reg;
2948 MONO_ADD_INS (cfg->cbb, ins);
2951 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
2952 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
2954 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
2955 mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
2958 EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
2962 mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
2964 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
2965 unsigned need_wb = 0;
2970 /*types with references can't have alignment smaller than sizeof(void*) */
2971 if (align < SIZEOF_VOID_P)
2974 /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
2975 if (size > 32 * SIZEOF_VOID_P)
2978 create_write_barrier_bitmap (cfg, klass, &need_wb, 0);
2980 /* We don't unroll more than 5 stores to avoid code bloat. */
2981 if (size > 5 * SIZEOF_VOID_P) {
2982 /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
2983 size += (SIZEOF_VOID_P - 1);
2984 size &= ~(SIZEOF_VOID_P - 1);
2986 EMIT_NEW_ICONST (cfg, iargs [2], size);
2987 EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
2988 mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
2992 destreg = iargs [0]->dreg;
2993 srcreg = iargs [1]->dreg;
2996 dest_ptr_reg = alloc_preg (cfg);
2997 tmp_reg = alloc_preg (cfg);
3000 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
3002 while (size >= SIZEOF_VOID_P) {
3003 MonoInst *load_inst;
3004 MONO_INST_NEW (cfg, load_inst, OP_LOAD_MEMBASE);
3005 load_inst->dreg = tmp_reg;
3006 load_inst->inst_basereg = srcreg;
3007 load_inst->inst_offset = offset;
3008 MONO_ADD_INS (cfg->cbb, load_inst);
3010 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
3013 emit_write_barrier (cfg, iargs [0], load_inst);
3015 offset += SIZEOF_VOID_P;
3016 size -= SIZEOF_VOID_P;
3019 /*tmp += sizeof (void*)*/
3020 if (size >= SIZEOF_VOID_P) {
3021 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
3022 MONO_ADD_INS (cfg->cbb, iargs [0]);
3026 /* Those cannot be references since size < sizeof (void*) */
3028 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
3029 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
3035 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
3036 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
3042 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
3043 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
3052 * Emit code to copy a valuetype of type @klass whose address is stored in
3053 * @src->dreg to memory whose address is stored at @dest->dreg.
3056 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
3058 MonoInst *iargs [4];
3059 int context_used, n;
3061 MonoMethod *memcpy_method;
3062 MonoInst *size_ins = NULL;
3063 MonoInst *memcpy_ins = NULL;
3067 * This check breaks with spilled vars... need to handle it during verification anyway.
3068 * g_assert (klass && klass == src->klass && klass == dest->klass);
3071 if (mini_is_gsharedvt_klass (cfg, klass)) {
3073 context_used = mini_class_check_context_used (cfg, klass);
3074 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3075 memcpy_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_MEMCPY);
3079 n = mono_class_native_size (klass, &align);
3081 n = mono_class_value_size (klass, &align);
3083 /* if native is true there should be no references in the struct */
3084 if (cfg->gen_write_barriers && (klass->has_references || size_ins) && !native) {
3085 /* Avoid barriers when storing to the stack */
3086 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
3087 (dest->opcode == OP_LDADDR))) {
3093 context_used = mini_class_check_context_used (cfg, klass);
3095 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
3096 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
3098 } else if (context_used) {
3099 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3101 if (cfg->compile_aot) {
3102 EMIT_NEW_CLASSCONST (cfg, iargs [2], klass);
3104 EMIT_NEW_PCONST (cfg, iargs [2], klass);
3105 mono_class_compute_gc_descriptor (klass);
3110 mono_emit_jit_icall (cfg, mono_gsharedvt_value_copy, iargs);
3112 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
3117 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
3118 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
3119 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
3124 iargs [2] = size_ins;
3126 EMIT_NEW_ICONST (cfg, iargs [2], n);
3128 memcpy_method = get_memcpy_method ();
3130 mono_emit_calli (cfg, mono_method_signature (memcpy_method), iargs, memcpy_ins, NULL, NULL);
3132 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
3137 get_memset_method (void)
3139 static MonoMethod *memset_method = NULL;
3140 if (!memset_method) {
3141 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
3143 g_error ("Old corlib found. Install a new one");
3145 return memset_method;
3149 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
3151 MonoInst *iargs [3];
3152 int n, context_used;
3154 MonoMethod *memset_method;
3155 MonoInst *size_ins = NULL;
3156 MonoInst *bzero_ins = NULL;
3157 static MonoMethod *bzero_method;
3159 /* FIXME: Optimize this for the case when dest is an LDADDR */
3161 mono_class_init (klass);
3162 if (mini_is_gsharedvt_klass (cfg, klass)) {
3163 context_used = mini_class_check_context_used (cfg, klass);
3164 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3165 bzero_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_BZERO);
3167 bzero_method = mono_class_get_method_from_name (mono_defaults.string_class, "bzero_aligned_1", 2);
3168 g_assert (bzero_method);
3170 iargs [1] = size_ins;
3171 mono_emit_calli (cfg, mono_method_signature (bzero_method), iargs, bzero_ins, NULL, NULL);
3175 n = mono_class_value_size (klass, &align);
3177 if (n <= sizeof (gpointer) * 5) {
3178 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
3181 memset_method = get_memset_method ();
3183 EMIT_NEW_ICONST (cfg, iargs [1], 0);
3184 EMIT_NEW_ICONST (cfg, iargs [2], n);
3185 mono_emit_method_call (cfg, memset_method, iargs, NULL);
3190 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
3192 MonoInst *this = NULL;
3194 g_assert (cfg->generic_sharing_context);
3196 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
3197 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
3198 !method->klass->valuetype)
3199 EMIT_NEW_ARGLOAD (cfg, this, 0);
3201 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
3202 MonoInst *mrgctx_loc, *mrgctx_var;
3205 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
3207 mrgctx_loc = mono_get_vtable_var (cfg);
3208 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
3211 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
3212 MonoInst *vtable_loc, *vtable_var;
3216 vtable_loc = mono_get_vtable_var (cfg);
3217 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
3219 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
3220 MonoInst *mrgctx_var = vtable_var;
3223 vtable_reg = alloc_preg (cfg);
3224 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, G_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
3225 vtable_var->type = STACK_PTR;
3233 vtable_reg = alloc_preg (cfg);
3234 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3239 static MonoJumpInfoRgctxEntry *
3240 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, MonoRgctxInfoType info_type)
3242 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
3243 res->method = method;
3244 res->in_mrgctx = in_mrgctx;
3245 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
3246 res->data->type = patch_type;
3247 res->data->data.target = patch_data;
3248 res->info_type = info_type;
3253 static inline MonoInst*
3254 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3256 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
3260 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
3261 MonoClass *klass, MonoRgctxInfoType rgctx_type)
3263 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
3264 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3266 return emit_rgctx_fetch (cfg, rgctx, entry);
3270 emit_get_rgctx_sig (MonoCompile *cfg, int context_used,
3271 MonoMethodSignature *sig, MonoRgctxInfoType rgctx_type)
3273 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_SIGNATURE, sig, rgctx_type);
3274 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3276 return emit_rgctx_fetch (cfg, rgctx, entry);
3280 emit_get_rgctx_gsharedvt_call (MonoCompile *cfg, int context_used,
3281 MonoMethodSignature *sig, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3283 MonoJumpInfoGSharedVtCall *call_info;
3284 MonoJumpInfoRgctxEntry *entry;
3287 call_info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoGSharedVtCall));
3288 call_info->sig = sig;
3289 call_info->method = cmethod;
3291 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_CALL, call_info, rgctx_type);
3292 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3294 return emit_rgctx_fetch (cfg, rgctx, entry);
3299 emit_get_rgctx_gsharedvt_method (MonoCompile *cfg, int context_used,
3300 MonoMethod *cmethod, MonoGSharedVtMethodInfo *info)
3302 MonoJumpInfoRgctxEntry *entry;
3305 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_METHOD, info, MONO_RGCTX_INFO_METHOD_GSHAREDVT_INFO);
3306 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3308 return emit_rgctx_fetch (cfg, rgctx, entry);
3312 * emit_get_rgctx_method:
3314 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
3315 * normal constants, else emit a load from the rgctx.
3318 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
3319 MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3321 if (!context_used) {
3324 switch (rgctx_type) {
3325 case MONO_RGCTX_INFO_METHOD:
3326 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
3328 case MONO_RGCTX_INFO_METHOD_RGCTX:
3329 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
3332 g_assert_not_reached ();
3335 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
3336 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3338 return emit_rgctx_fetch (cfg, rgctx, entry);
3343 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
3344 MonoClassField *field, MonoRgctxInfoType rgctx_type)
3346 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
3347 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3349 return emit_rgctx_fetch (cfg, rgctx, entry);
3353 get_gsharedvt_info_slot (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3355 MonoGSharedVtMethodInfo *info = cfg->gsharedvt_info;
3356 MonoRuntimeGenericContextInfoTemplate *template;
3361 for (i = 0; i < info->num_entries; ++i) {
3362 MonoRuntimeGenericContextInfoTemplate *otemplate = &info->entries [i];
3364 if (otemplate->info_type == rgctx_type && otemplate->data == data && rgctx_type != MONO_RGCTX_INFO_LOCAL_OFFSET)
3368 if (info->num_entries == info->count_entries) {
3369 MonoRuntimeGenericContextInfoTemplate *new_entries;
3370 int new_count_entries = info->count_entries ? info->count_entries * 2 : 16;
3372 new_entries = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * new_count_entries);
3374 memcpy (new_entries, info->entries, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
3375 info->entries = new_entries;
3376 info->count_entries = new_count_entries;
3379 idx = info->num_entries;
3380 template = &info->entries [idx];
3381 template->info_type = rgctx_type;
3382 template->data = data;
3384 info->num_entries ++;
3390 * emit_get_gsharedvt_info:
3392 * This is similar to emit_get_rgctx_.., but loads the data from the gsharedvt info var instead of calling an rgctx fetch trampoline.
3395 emit_get_gsharedvt_info (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3400 idx = get_gsharedvt_info_slot (cfg, data, rgctx_type);
3401 /* Load info->entries [idx] */
3402 dreg = alloc_preg (cfg);
3403 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, cfg->gsharedvt_info_var->dreg, G_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
3409 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type)
3411 return emit_get_gsharedvt_info (cfg, &klass->byval_arg, rgctx_type);
3415 * On return the caller must check @klass for load errors.
3418 emit_generic_class_init (MonoCompile *cfg, MonoClass *klass)
3420 MonoInst *vtable_arg;
3424 context_used = mini_class_check_context_used (cfg, klass);
3427 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
3428 klass, MONO_RGCTX_INFO_VTABLE);
3430 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3434 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
3437 if (COMPILE_LLVM (cfg))
3438 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline_llvm, &vtable_arg);
3440 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable_arg);
3441 #ifdef MONO_ARCH_VTABLE_REG
3442 mono_call_inst_add_outarg_reg (cfg, call, vtable_arg->dreg, MONO_ARCH_VTABLE_REG, FALSE);
3443 cfg->uses_vtable_reg = TRUE;
3450 emit_seq_point (MonoCompile *cfg, MonoMethod *method, guint8* ip, gboolean intr_loc, gboolean nonempty_stack)
3454 if (cfg->gen_seq_points && cfg->method == method) {
3455 NEW_SEQ_POINT (cfg, ins, ip - cfg->header->code, intr_loc);
3457 ins->flags |= MONO_INST_NONEMPTY_STACK;
3458 MONO_ADD_INS (cfg->cbb, ins);
3463 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg, gboolean null_check, MonoBasicBlock **out_bblock)
3465 if (mini_get_debug_options ()->better_cast_details) {
3466 int to_klass_reg = alloc_preg (cfg);
3467 int vtable_reg = alloc_preg (cfg);
3468 int klass_reg = alloc_preg (cfg);
3469 MonoBasicBlock *is_null_bb = NULL;
3473 NEW_BBLOCK (cfg, is_null_bb);
3475 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3476 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3479 tls_get = mono_get_jit_tls_intrinsic (cfg);
3481 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3485 MONO_ADD_INS (cfg->cbb, tls_get);
3486 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3487 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3489 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3490 MONO_EMIT_NEW_PCONST (cfg, to_klass_reg, klass);
3491 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3494 MONO_START_BB (cfg, is_null_bb);
3496 *out_bblock = cfg->cbb;
3502 reset_cast_details (MonoCompile *cfg)
3504 /* Reset the variables holding the cast details */
3505 if (mini_get_debug_options ()->better_cast_details) {
3506 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3508 MONO_ADD_INS (cfg->cbb, tls_get);
3509 /* It is enough to reset the from field */
3510 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3515 * On return the caller must check @array_class for load errors
3518 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
3520 int vtable_reg = alloc_preg (cfg);
3523 context_used = mini_class_check_context_used (cfg, array_class);
3525 save_cast_details (cfg, array_class, obj->dreg, FALSE, NULL);
3527 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3529 if (cfg->opt & MONO_OPT_SHARED) {
3530 int class_reg = alloc_preg (cfg);
3531 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3532 if (cfg->compile_aot) {
3533 int klass_reg = alloc_preg (cfg);
3534 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
3535 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
3537 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
3539 } else if (context_used) {
3540 MonoInst *vtable_ins;
3542 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
3543 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
3545 if (cfg->compile_aot) {
3549 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3551 vt_reg = alloc_preg (cfg);
3552 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
3553 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
3556 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3558 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
3562 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
3564 reset_cast_details (cfg);
3568 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
3569 * generic code is generated.
3572 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
3574 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
3577 MonoInst *rgctx, *addr;
3579 /* FIXME: What if the class is shared? We might not
3580 have to get the address of the method from the
3582 addr = emit_get_rgctx_method (cfg, context_used, method,
3583 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3585 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3587 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
3589 gboolean pass_vtable, pass_mrgctx;
3590 MonoInst *rgctx_arg = NULL;
3592 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
3593 g_assert (!pass_mrgctx);
3596 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3599 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
3602 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
3607 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
3611 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
3612 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
3613 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
3614 int rank_reg = alloc_dreg (cfg ,STACK_I4);
3616 obj_reg = sp [0]->dreg;
3617 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3618 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3620 /* FIXME: generics */
3621 g_assert (klass->rank == 0);
3624 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
3625 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3627 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3628 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, element_class));
3631 MonoInst *element_class;
3633 /* This assertion is from the unboxcast insn */
3634 g_assert (klass->rank == 0);
3636 element_class = emit_get_rgctx_klass (cfg, context_used,
3637 klass->element_class, MONO_RGCTX_INFO_KLASS);
3639 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
3640 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3642 save_cast_details (cfg, klass->element_class, obj_reg, FALSE, NULL);
3643 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
3644 reset_cast_details (cfg);
3647 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), obj_reg, sizeof (MonoObject));
3648 MONO_ADD_INS (cfg->cbb, add);
3649 add->type = STACK_MP;
3656 handle_unbox_gsharedvt (MonoCompile *cfg, MonoClass *klass, MonoInst *obj, MonoBasicBlock **out_cbb)
3658 MonoInst *addr, *klass_inst, *is_ref, *args[16];
3659 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
3663 klass_inst = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_KLASS);
3669 args [1] = klass_inst;
3672 obj = mono_emit_jit_icall (cfg, mono_object_castclass_unbox, args);
3674 NEW_BBLOCK (cfg, is_ref_bb);
3675 NEW_BBLOCK (cfg, is_nullable_bb);
3676 NEW_BBLOCK (cfg, end_bb);
3677 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
3678 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 1);
3679 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
3681 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 2);
3682 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
3684 /* This will contain either the address of the unboxed vtype, or an address of the temporary where the ref is stored */
3685 addr_reg = alloc_dreg (cfg, STACK_MP);
3689 NEW_BIALU_IMM (cfg, addr, OP_ADD_IMM, addr_reg, obj->dreg, sizeof (MonoObject));
3690 MONO_ADD_INS (cfg->cbb, addr);
3692 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3695 MONO_START_BB (cfg, is_ref_bb);
3697 /* Save the ref to a temporary */
3698 dreg = alloc_ireg (cfg);
3699 EMIT_NEW_VARLOADA_VREG (cfg, addr, dreg, &klass->byval_arg);
3700 addr->dreg = addr_reg;
3701 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, obj->dreg);
3702 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3705 MONO_START_BB (cfg, is_nullable_bb);
3708 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_NULLABLE_CLASS_UNBOX);
3709 MonoInst *unbox_call;
3710 MonoMethodSignature *unbox_sig;
3713 var = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
3715 unbox_sig = mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
3716 unbox_sig->ret = &klass->byval_arg;
3717 unbox_sig->param_count = 1;
3718 unbox_sig->params [0] = &mono_defaults.object_class->byval_arg;
3719 unbox_call = mono_emit_calli (cfg, unbox_sig, &obj, addr, NULL, NULL);
3721 EMIT_NEW_VARLOADA_VREG (cfg, addr, unbox_call->dreg, &klass->byval_arg);
3722 addr->dreg = addr_reg;
3725 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3728 MONO_START_BB (cfg, end_bb);
3731 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr_reg, 0);
3733 *out_cbb = cfg->cbb;
3739 * Returns NULL and set the cfg exception on error.
3742 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
3744 MonoInst *iargs [2];
3750 MonoInst *iargs [2];
3752 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (klass, for_box);
3754 if (cfg->opt & MONO_OPT_SHARED)
3755 rgctx_info = MONO_RGCTX_INFO_KLASS;
3757 rgctx_info = MONO_RGCTX_INFO_VTABLE;
3758 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
3760 if (cfg->opt & MONO_OPT_SHARED) {
3761 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3763 alloc_ftn = mono_object_new;
3766 alloc_ftn = mono_object_new_specific;
3769 if (managed_alloc && !(cfg->opt & MONO_OPT_SHARED))
3770 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3772 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3775 if (cfg->opt & MONO_OPT_SHARED) {
3776 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3777 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
3779 alloc_ftn = mono_object_new;
3780 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
3781 /* This happens often in argument checking code, eg. throw new FooException... */
3782 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
3783 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
3784 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
3786 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3787 MonoMethod *managed_alloc = NULL;
3791 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
3792 cfg->exception_ptr = klass;
3796 #ifndef MONO_CROSS_COMPILE
3797 managed_alloc = mono_gc_get_managed_allocator (klass, for_box);
3800 if (managed_alloc) {
3801 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3802 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3804 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
3806 guint32 lw = vtable->klass->instance_size;
3807 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
3808 EMIT_NEW_ICONST (cfg, iargs [0], lw);
3809 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
3812 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3816 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3820 * Returns NULL and set the cfg exception on error.
3823 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used, MonoBasicBlock **out_cbb)
3825 MonoInst *alloc, *ins;
3827 *out_cbb = cfg->cbb;
3829 if (mono_class_is_nullable (klass)) {
3830 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
3833 /* FIXME: What if the class is shared? We might not
3834 have to get the method address from the RGCTX. */
3835 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
3836 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3837 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3839 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
3841 gboolean pass_vtable, pass_mrgctx;
3842 MonoInst *rgctx_arg = NULL;
3844 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
3845 g_assert (!pass_mrgctx);
3848 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3851 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
3854 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
3858 if (mini_is_gsharedvt_klass (cfg, klass)) {
3859 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
3860 MonoInst *res, *is_ref, *src_var, *addr;
3863 dreg = alloc_ireg (cfg);
3865 NEW_BBLOCK (cfg, is_ref_bb);
3866 NEW_BBLOCK (cfg, is_nullable_bb);
3867 NEW_BBLOCK (cfg, end_bb);
3868 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
3869 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 1);
3870 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
3872 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 2);
3873 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
3876 alloc = handle_alloc (cfg, klass, TRUE, context_used);
3879 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3880 ins->opcode = OP_STOREV_MEMBASE;
3882 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, alloc->dreg);
3883 res->type = STACK_OBJ;
3885 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3888 MONO_START_BB (cfg, is_ref_bb);
3889 addr_reg = alloc_ireg (cfg);
3891 /* val is a vtype, so has to load the value manually */
3892 src_var = get_vreg_to_inst (cfg, val->dreg);
3894 src_var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, val->dreg);
3895 EMIT_NEW_VARLOADA (cfg, addr, src_var, src_var->inst_vtype);
3896 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, addr->dreg, 0);
3897 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3900 MONO_START_BB (cfg, is_nullable_bb);
3903 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass,
3904 MONO_RGCTX_INFO_NULLABLE_CLASS_BOX);
3906 MonoMethodSignature *box_sig;
3909 * klass is Nullable<T>, need to call Nullable<T>.Box () using a gsharedvt signature, but we cannot
3910 * construct that method at JIT time, so have to do things by hand.
3912 box_sig = mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
3913 box_sig->ret = &mono_defaults.object_class->byval_arg;
3914 box_sig->param_count = 1;
3915 box_sig->params [0] = &klass->byval_arg;
3916 box_call = mono_emit_calli (cfg, box_sig, &val, addr, NULL, NULL);
3917 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, box_call->dreg);
3918 res->type = STACK_OBJ;
3922 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3924 MONO_START_BB (cfg, end_bb);
3926 *out_cbb = cfg->cbb;
3930 alloc = handle_alloc (cfg, klass, TRUE, context_used);
3934 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3941 mini_class_has_reference_variant_generic_argument (MonoCompile *cfg, MonoClass *klass, int context_used)
3944 MonoGenericContainer *container;
3945 MonoGenericInst *ginst;
3947 if (klass->generic_class) {
3948 container = klass->generic_class->container_class->generic_container;
3949 ginst = klass->generic_class->context.class_inst;
3950 } else if (klass->generic_container && context_used) {
3951 container = klass->generic_container;
3952 ginst = container->context.class_inst;
3957 for (i = 0; i < container->type_argc; ++i) {
3959 if (!(mono_generic_container_get_param_info (container, i)->flags & (MONO_GEN_PARAM_VARIANT|MONO_GEN_PARAM_COVARIANT)))
3961 type = ginst->type_argv [i];
3962 if (mini_type_is_reference (cfg, type))
3968 // FIXME: This doesn't work yet (class libs tests fail?)
3969 #define is_complex_isinst(klass) (TRUE || (klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || mono_class_is_marshalbyref (klass) || (klass->flags & TYPE_ATTRIBUTE_SEALED) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
3972 * Returns NULL and set the cfg exception on error.
3975 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3977 MonoBasicBlock *is_null_bb;
3978 int obj_reg = src->dreg;
3979 int vtable_reg = alloc_preg (cfg);
3980 MonoInst *klass_inst = NULL;
3985 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
3986 MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
3987 MonoInst *cache_ins;
3989 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
3994 /* klass - it's the second element of the cache entry*/
3995 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
3998 args [2] = cache_ins;
4000 return mono_emit_method_call (cfg, mono_castclass, args, NULL);
4003 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
4006 NEW_BBLOCK (cfg, is_null_bb);
4008 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4009 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
4011 save_cast_details (cfg, klass, obj_reg, FALSE, NULL);
4013 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4014 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4015 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
4017 int klass_reg = alloc_preg (cfg);
4019 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4021 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4022 /* the remoting code is broken, access the class for now */
4023 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4024 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4026 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4027 cfg->exception_ptr = klass;
4030 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4032 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4033 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4035 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
4037 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4038 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
4042 MONO_START_BB (cfg, is_null_bb);
4044 reset_cast_details (cfg);
4050 * Returns NULL and set the cfg exception on error.
4053 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
4056 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
4057 int obj_reg = src->dreg;
4058 int vtable_reg = alloc_preg (cfg);
4059 int res_reg = alloc_ireg_ref (cfg);
4060 MonoInst *klass_inst = NULL;
4065 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
4066 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
4067 MonoInst *cache_ins;
4069 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
4074 /* klass - it's the second element of the cache entry*/
4075 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
4078 args [2] = cache_ins;
4080 return mono_emit_method_call (cfg, mono_isinst, args, NULL);
4083 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
4086 NEW_BBLOCK (cfg, is_null_bb);
4087 NEW_BBLOCK (cfg, false_bb);
4088 NEW_BBLOCK (cfg, end_bb);
4090 /* Do the assignment at the beginning, so the other assignment can be if converted */
4091 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
4092 ins->type = STACK_OBJ;
4095 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4096 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
4098 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4100 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4101 g_assert (!context_used);
4102 /* the is_null_bb target simply copies the input register to the output */
4103 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
4105 int klass_reg = alloc_preg (cfg);
4108 int rank_reg = alloc_preg (cfg);
4109 int eclass_reg = alloc_preg (cfg);
4111 g_assert (!context_used);
4112 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
4113 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
4114 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4115 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4116 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
4117 if (klass->cast_class == mono_defaults.object_class) {
4118 int parent_reg = alloc_preg (cfg);
4119 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
4120 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
4121 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4122 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4123 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
4124 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
4125 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4126 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4127 } else if (klass->cast_class == mono_defaults.enum_class) {
4128 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4129 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4130 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
4131 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4133 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
4134 /* Check that the object is a vector too */
4135 int bounds_reg = alloc_preg (cfg);
4136 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
4137 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
4138 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4141 /* the is_null_bb target simply copies the input register to the output */
4142 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4144 } else if (mono_class_is_nullable (klass)) {
4145 g_assert (!context_used);
4146 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4147 /* the is_null_bb target simply copies the input register to the output */
4148 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
4150 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4151 g_assert (!context_used);
4152 /* the remoting code is broken, access the class for now */
4153 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4154 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4156 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4157 cfg->exception_ptr = klass;
4160 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4162 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4163 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4165 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4166 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
4168 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4169 /* the is_null_bb target simply copies the input register to the output */
4170 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
4175 MONO_START_BB (cfg, false_bb);
4177 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
4178 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4180 MONO_START_BB (cfg, is_null_bb);
4182 MONO_START_BB (cfg, end_bb);
4188 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4190 /* This opcode takes as input an object reference and a class, and returns:
4191 0) if the object is an instance of the class,
4192 1) if the object is not instance of the class,
4193 2) if the object is a proxy whose type cannot be determined */
4196 #ifndef DISABLE_REMOTING
4197 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
4199 MonoBasicBlock *true_bb, *false_bb, *end_bb;
4201 int obj_reg = src->dreg;
4202 int dreg = alloc_ireg (cfg);
4204 #ifndef DISABLE_REMOTING
4205 int klass_reg = alloc_preg (cfg);
4208 NEW_BBLOCK (cfg, true_bb);
4209 NEW_BBLOCK (cfg, false_bb);
4210 NEW_BBLOCK (cfg, end_bb);
4211 #ifndef DISABLE_REMOTING
4212 NEW_BBLOCK (cfg, false2_bb);
4213 NEW_BBLOCK (cfg, no_proxy_bb);
4216 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4217 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
4219 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4220 #ifndef DISABLE_REMOTING
4221 NEW_BBLOCK (cfg, interface_fail_bb);
4224 tmp_reg = alloc_preg (cfg);
4225 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4226 #ifndef DISABLE_REMOTING
4227 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
4228 MONO_START_BB (cfg, interface_fail_bb);
4229 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4231 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
4233 tmp_reg = alloc_preg (cfg);
4234 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4235 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4236 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
4238 mini_emit_iface_cast (cfg, tmp_reg, klass, false_bb, true_bb);
4241 #ifndef DISABLE_REMOTING
4242 tmp_reg = alloc_preg (cfg);
4243 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4244 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4246 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4247 tmp_reg = alloc_preg (cfg);
4248 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4249 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4251 tmp_reg = alloc_preg (cfg);
4252 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4253 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4254 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4256 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
4257 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
4259 MONO_START_BB (cfg, no_proxy_bb);
4261 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
4263 g_error ("transparent proxy support is disabled while trying to JIT code that uses it");
4267 MONO_START_BB (cfg, false_bb);
4269 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4270 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4272 #ifndef DISABLE_REMOTING
4273 MONO_START_BB (cfg, false2_bb);
4275 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
4276 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4279 MONO_START_BB (cfg, true_bb);
4281 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4283 MONO_START_BB (cfg, end_bb);
4286 MONO_INST_NEW (cfg, ins, OP_ICONST);
4288 ins->type = STACK_I4;
4294 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4296 /* This opcode takes as input an object reference and a class, and returns:
4297 0) if the object is an instance of the class,
4298 1) if the object is a proxy whose type cannot be determined
4299 an InvalidCastException exception is thrown otherwhise*/
4302 #ifndef DISABLE_REMOTING
4303 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
4305 MonoBasicBlock *ok_result_bb;
4307 int obj_reg = src->dreg;
4308 int dreg = alloc_ireg (cfg);
4309 int tmp_reg = alloc_preg (cfg);
4311 #ifndef DISABLE_REMOTING
4312 int klass_reg = alloc_preg (cfg);
4313 NEW_BBLOCK (cfg, end_bb);
4316 NEW_BBLOCK (cfg, ok_result_bb);
4318 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4319 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
4321 save_cast_details (cfg, klass, obj_reg, FALSE, NULL);
4323 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4324 #ifndef DISABLE_REMOTING
4325 NEW_BBLOCK (cfg, interface_fail_bb);
4327 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4328 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
4329 MONO_START_BB (cfg, interface_fail_bb);
4330 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4332 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
4334 tmp_reg = alloc_preg (cfg);
4335 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4336 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4337 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
4339 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4340 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4342 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4343 mini_emit_iface_cast (cfg, tmp_reg, klass, NULL, NULL);
4344 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, ok_result_bb);
4347 #ifndef DISABLE_REMOTING
4348 NEW_BBLOCK (cfg, no_proxy_bb);
4350 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4351 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4352 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4354 tmp_reg = alloc_preg (cfg);
4355 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4356 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4358 tmp_reg = alloc_preg (cfg);
4359 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4360 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4361 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4363 NEW_BBLOCK (cfg, fail_1_bb);
4365 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
4367 MONO_START_BB (cfg, fail_1_bb);
4369 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4370 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4372 MONO_START_BB (cfg, no_proxy_bb);
4374 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
4376 g_error ("Transparent proxy support is disabled while trying to JIT code that uses it");
4380 MONO_START_BB (cfg, ok_result_bb);
4382 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4384 #ifndef DISABLE_REMOTING
4385 MONO_START_BB (cfg, end_bb);
4389 MONO_INST_NEW (cfg, ins, OP_ICONST);
4391 ins->type = STACK_I4;
4397 * Returns NULL and set the cfg exception on error.
4399 static G_GNUC_UNUSED MonoInst*
4400 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used)
4404 gpointer *trampoline;
4405 MonoInst *obj, *method_ins, *tramp_ins;
4409 obj = handle_alloc (cfg, klass, FALSE, 0);
4413 /* Inline the contents of mono_delegate_ctor */
4415 /* Set target field */
4416 /* Optimize away setting of NULL target */
4417 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0)) {
4418 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
4419 if (cfg->gen_write_barriers) {
4420 dreg = alloc_preg (cfg);
4421 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target));
4422 emit_write_barrier (cfg, ptr, target);
4426 /* Set method field */
4427 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
4428 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
4429 if (cfg->gen_write_barriers) {
4430 dreg = alloc_preg (cfg);
4431 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method));
4432 emit_write_barrier (cfg, ptr, method_ins);
4435 * To avoid looking up the compiled code belonging to the target method
4436 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
4437 * store it, and we fill it after the method has been compiled.
4439 if (!method->dynamic && !(cfg->opt & MONO_OPT_SHARED)) {
4440 MonoInst *code_slot_ins;
4443 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
4445 domain = mono_domain_get ();
4446 mono_domain_lock (domain);
4447 if (!domain_jit_info (domain)->method_code_hash)
4448 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
4449 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
4451 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
4452 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
4454 mono_domain_unlock (domain);
4456 if (cfg->compile_aot)
4457 EMIT_NEW_AOTCONST (cfg, code_slot_ins, MONO_PATCH_INFO_METHOD_CODE_SLOT, method);
4459 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
4461 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
4464 /* Set invoke_impl field */
4465 if (cfg->compile_aot) {
4466 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, klass);
4468 trampoline = mono_create_delegate_trampoline (cfg->domain, klass);
4469 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
4471 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
4473 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
4479 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
4481 MonoJitICallInfo *info;
4483 /* Need to register the icall so it gets an icall wrapper */
4484 info = mono_get_array_new_va_icall (rank);
4486 cfg->flags |= MONO_CFG_HAS_VARARGS;
4488 /* mono_array_new_va () needs a vararg calling convention */
4489 cfg->disable_llvm = TRUE;
4491 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
4492 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
4496 mono_emit_load_got_addr (MonoCompile *cfg)
4498 MonoInst *getaddr, *dummy_use;
4500 if (!cfg->got_var || cfg->got_var_allocated)
4503 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
4504 getaddr->cil_code = cfg->header->code;
4505 getaddr->dreg = cfg->got_var->dreg;
4507 /* Add it to the start of the first bblock */
4508 if (cfg->bb_entry->code) {
4509 getaddr->next = cfg->bb_entry->code;
4510 cfg->bb_entry->code = getaddr;
4513 MONO_ADD_INS (cfg->bb_entry, getaddr);
4515 cfg->got_var_allocated = TRUE;
4518 * Add a dummy use to keep the got_var alive, since real uses might
4519 * only be generated by the back ends.
4520 * Add it to end_bblock, so the variable's lifetime covers the whole
4522 * It would be better to make the usage of the got var explicit in all
4523 * cases when the backend needs it (i.e. calls, throw etc.), so this
4524 * wouldn't be needed.
4526 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
4527 MONO_ADD_INS (cfg->bb_exit, dummy_use);
4530 static int inline_limit;
4531 static gboolean inline_limit_inited;
4534 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
4536 MonoMethodHeaderSummary header;
4538 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
4539 MonoMethodSignature *sig = mono_method_signature (method);
4543 if (cfg->generic_sharing_context)
4546 if (cfg->inline_depth > 10)
4549 #ifdef MONO_ARCH_HAVE_LMF_OPS
4550 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
4551 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
4552 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
4557 if (!mono_method_get_header_summary (method, &header))
4560 /*runtime, icall and pinvoke are checked by summary call*/
4561 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
4562 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
4563 (mono_class_is_marshalbyref (method->klass)) ||
4567 /* also consider num_locals? */
4568 /* Do the size check early to avoid creating vtables */
4569 if (!inline_limit_inited) {
4570 if (g_getenv ("MONO_INLINELIMIT"))
4571 inline_limit = atoi (g_getenv ("MONO_INLINELIMIT"));
4573 inline_limit = INLINE_LENGTH_LIMIT;
4574 inline_limit_inited = TRUE;
4576 if (header.code_size >= inline_limit && !(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
4580 * if we can initialize the class of the method right away, we do,
4581 * otherwise we don't allow inlining if the class needs initialization,
4582 * since it would mean inserting a call to mono_runtime_class_init()
4583 * inside the inlined code
4585 if (!(cfg->opt & MONO_OPT_SHARED)) {
4586 /* The AggressiveInlining hint is a good excuse to force that cctor to run. */
4587 if (method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING) {
4588 vtable = mono_class_vtable (cfg->domain, method->klass);
4591 if (cfg->compile_aot && mono_class_needs_cctor_run (method->klass, NULL))
4593 mono_runtime_class_init (vtable);
4594 } else if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
4595 if (cfg->run_cctors && method->klass->has_cctor) {
4596 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
4597 if (!method->klass->runtime_info)
4598 /* No vtable created yet */
4600 vtable = mono_class_vtable (cfg->domain, method->klass);
4603 /* This makes so that inline cannot trigger */
4604 /* .cctors: too many apps depend on them */
4605 /* running with a specific order... */
4606 if (! vtable->initialized)
4608 mono_runtime_class_init (vtable);
4610 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
4611 if (!method->klass->runtime_info)
4612 /* No vtable created yet */
4614 vtable = mono_class_vtable (cfg->domain, method->klass);
4617 if (!vtable->initialized)
4622 * If we're compiling for shared code
4623 * the cctor will need to be run at aot method load time, for example,
4624 * or at the end of the compilation of the inlining method.
4626 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
4631 * CAS - do not inline methods with declarative security
4632 * Note: this has to be before any possible return TRUE;
4634 if (mono_security_method_has_declsec (method))
4637 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
4638 if (mono_arch_is_soft_float ()) {
4640 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
4642 for (i = 0; i < sig->param_count; ++i)
4643 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
4652 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoClass *klass, MonoVTable *vtable)
4654 if (!cfg->compile_aot) {
4656 if (vtable->initialized)
4660 if (klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
4661 if (cfg->method == method)
4665 if (!mono_class_needs_cctor_run (klass, method))
4668 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (klass == method->klass))
4669 /* The initialization is already done before the method is called */
4676 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
4680 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
4683 if (mini_is_gsharedvt_variable_klass (cfg, klass)) {
4686 mono_class_init (klass);
4687 size = mono_class_array_element_size (klass);
4690 mult_reg = alloc_preg (cfg);
4691 array_reg = arr->dreg;
4692 index_reg = index->dreg;
4694 #if SIZEOF_REGISTER == 8
4695 /* The array reg is 64 bits but the index reg is only 32 */
4696 if (COMPILE_LLVM (cfg)) {
4698 index2_reg = index_reg;
4700 index2_reg = alloc_preg (cfg);
4701 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
4704 if (index->type == STACK_I8) {
4705 index2_reg = alloc_preg (cfg);
4706 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
4708 index2_reg = index_reg;
4713 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
4715 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4716 if (size == 1 || size == 2 || size == 4 || size == 8) {
4717 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
4719 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], G_STRUCT_OFFSET (MonoArray, vector));
4720 ins->klass = mono_class_get_element_class (klass);
4721 ins->type = STACK_MP;
4727 add_reg = alloc_ireg_mp (cfg);
4730 MonoInst *rgctx_ins;
4733 g_assert (cfg->generic_sharing_context);
4734 context_used = mini_class_check_context_used (cfg, klass);
4735 g_assert (context_used);
4736 rgctx_ins = emit_get_gsharedvt_info (cfg, &klass->byval_arg, MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE);
4737 MONO_EMIT_NEW_BIALU (cfg, OP_IMUL, mult_reg, index2_reg, rgctx_ins->dreg);
4739 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
4741 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
4742 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
4743 ins->klass = mono_class_get_element_class (klass);
4744 ins->type = STACK_MP;
4745 MONO_ADD_INS (cfg->cbb, ins);
4750 #ifndef MONO_ARCH_EMULATE_MUL_DIV
4752 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
4754 int bounds_reg = alloc_preg (cfg);
4755 int add_reg = alloc_ireg_mp (cfg);
4756 int mult_reg = alloc_preg (cfg);
4757 int mult2_reg = alloc_preg (cfg);
4758 int low1_reg = alloc_preg (cfg);
4759 int low2_reg = alloc_preg (cfg);
4760 int high1_reg = alloc_preg (cfg);
4761 int high2_reg = alloc_preg (cfg);
4762 int realidx1_reg = alloc_preg (cfg);
4763 int realidx2_reg = alloc_preg (cfg);
4764 int sum_reg = alloc_preg (cfg);
4765 int index1, index2, tmpreg;
4769 mono_class_init (klass);
4770 size = mono_class_array_element_size (klass);
4772 index1 = index_ins1->dreg;
4773 index2 = index_ins2->dreg;
4775 #if SIZEOF_REGISTER == 8
4776 /* The array reg is 64 bits but the index reg is only 32 */
4777 if (COMPILE_LLVM (cfg)) {
4780 tmpreg = alloc_preg (cfg);
4781 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index1);
4783 tmpreg = alloc_preg (cfg);
4784 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index2);
4788 // FIXME: Do we need to do something here for i8 indexes, like in ldelema_1_ins ?
4792 /* range checking */
4793 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
4794 arr->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
4796 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
4797 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4798 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
4799 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
4800 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
4801 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
4802 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4804 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
4805 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4806 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
4807 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
4808 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, length));
4809 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
4810 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4812 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
4813 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
4814 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
4815 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
4816 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
4818 ins->type = STACK_MP;
4820 MONO_ADD_INS (cfg->cbb, ins);
4827 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
4831 MonoMethod *addr_method;
4834 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
4837 return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], TRUE);
4839 #ifndef MONO_ARCH_EMULATE_MUL_DIV
4840 /* emit_ldelema_2 depends on OP_LMUL */
4841 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS)) {
4842 return mini_emit_ldelema_2_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], sp [2]);
4846 element_size = mono_class_array_element_size (cmethod->klass->element_class);
4847 addr_method = mono_marshal_get_array_address (rank, element_size);
4848 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
4853 static MonoBreakPolicy
4854 always_insert_breakpoint (MonoMethod *method)
4856 return MONO_BREAK_POLICY_ALWAYS;
4859 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
4862 * mono_set_break_policy:
4863 * policy_callback: the new callback function
4865 * Allow embedders to decide wherther to actually obey breakpoint instructions
4866 * (both break IL instructions and Debugger.Break () method calls), for example
4867 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
4868 * untrusted or semi-trusted code.
4870 * @policy_callback will be called every time a break point instruction needs to
4871 * be inserted with the method argument being the method that calls Debugger.Break()
4872 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
4873 * if it wants the breakpoint to not be effective in the given method.
4874 * #MONO_BREAK_POLICY_ALWAYS is the default.
4877 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
4879 if (policy_callback)
4880 break_policy_func = policy_callback;
4882 break_policy_func = always_insert_breakpoint;
4886 should_insert_brekpoint (MonoMethod *method) {
4887 switch (break_policy_func (method)) {
4888 case MONO_BREAK_POLICY_ALWAYS:
4890 case MONO_BREAK_POLICY_NEVER:
4892 case MONO_BREAK_POLICY_ON_DBG:
4893 g_warning ("mdb no longer supported");
4896 g_warning ("Incorrect value returned from break policy callback");
4901 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
4903 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
4905 MonoInst *addr, *store, *load;
4906 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
4908 /* the bounds check is already done by the callers */
4909 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
4911 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
4912 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
4913 if (mini_type_is_reference (cfg, fsig->params [2]))
4914 emit_write_barrier (cfg, addr, load);
4916 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
4917 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
4924 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
4926 return mini_type_is_reference (cfg, &klass->byval_arg);
4930 emit_array_store (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, gboolean safety_checks)
4932 if (safety_checks && generic_class_is_reference_type (cfg, klass) &&
4933 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
4934 MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class, 1);
4935 MonoMethod *helper = mono_marshal_get_virtual_stelemref (obj_array);
4936 MonoInst *iargs [3];
4939 mono_class_setup_vtable (obj_array);
4940 g_assert (helper->slot);
4942 if (sp [0]->type != STACK_OBJ)
4944 if (sp [2]->type != STACK_OBJ)
4951 return mono_emit_method_call (cfg, helper, iargs, sp [0]);
4955 if (mini_is_gsharedvt_variable_klass (cfg, klass)) {
4958 // FIXME-VT: OP_ICONST optimization
4959 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
4960 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
4961 ins->opcode = OP_STOREV_MEMBASE;
4962 } else if (sp [1]->opcode == OP_ICONST) {
4963 int array_reg = sp [0]->dreg;
4964 int index_reg = sp [1]->dreg;
4965 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
4968 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
4969 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
4971 MonoInst *addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], safety_checks);
4972 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
4973 if (generic_class_is_reference_type (cfg, klass))
4974 emit_write_barrier (cfg, addr, sp [2]);
4981 emit_array_unsafe_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
4986 eklass = mono_class_from_mono_type (fsig->params [2]);
4988 eklass = mono_class_from_mono_type (fsig->ret);
4992 return emit_array_store (cfg, eklass, args, FALSE);
4994 MonoInst *ins, *addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
4995 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &eklass->byval_arg, addr->dreg, 0);
5001 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5003 #ifdef MONO_ARCH_SIMD_INTRINSICS
5004 MonoInst *ins = NULL;
5006 if (cfg->opt & MONO_OPT_SIMD) {
5007 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
5013 return mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
5017 emit_memory_barrier (MonoCompile *cfg, int kind)
5019 MonoInst *ins = NULL;
5020 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
5021 MONO_ADD_INS (cfg->cbb, ins);
5022 ins->backend.memory_barrier_kind = kind;
5028 llvm_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5030 MonoInst *ins = NULL;
5033 /* The LLVM backend supports these intrinsics */
5034 if (cmethod->klass == mono_defaults.math_class) {
5035 if (strcmp (cmethod->name, "Sin") == 0) {
5037 } else if (strcmp (cmethod->name, "Cos") == 0) {
5039 } else if (strcmp (cmethod->name, "Sqrt") == 0) {
5041 } else if (strcmp (cmethod->name, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8) {
5046 MONO_INST_NEW (cfg, ins, opcode);
5047 ins->type = STACK_R8;
5048 ins->dreg = mono_alloc_freg (cfg);
5049 ins->sreg1 = args [0]->dreg;
5050 MONO_ADD_INS (cfg->cbb, ins);
5054 if (cfg->opt & MONO_OPT_CMOV) {
5055 if (strcmp (cmethod->name, "Min") == 0) {
5056 if (fsig->params [0]->type == MONO_TYPE_I4)
5058 if (fsig->params [0]->type == MONO_TYPE_U4)
5059 opcode = OP_IMIN_UN;
5060 else if (fsig->params [0]->type == MONO_TYPE_I8)
5062 else if (fsig->params [0]->type == MONO_TYPE_U8)
5063 opcode = OP_LMIN_UN;
5064 } else if (strcmp (cmethod->name, "Max") == 0) {
5065 if (fsig->params [0]->type == MONO_TYPE_I4)
5067 if (fsig->params [0]->type == MONO_TYPE_U4)
5068 opcode = OP_IMAX_UN;
5069 else if (fsig->params [0]->type == MONO_TYPE_I8)
5071 else if (fsig->params [0]->type == MONO_TYPE_U8)
5072 opcode = OP_LMAX_UN;
5077 MONO_INST_NEW (cfg, ins, opcode);
5078 ins->type = fsig->params [0]->type == MONO_TYPE_I4 ? STACK_I4 : STACK_I8;
5079 ins->dreg = mono_alloc_ireg (cfg);
5080 ins->sreg1 = args [0]->dreg;
5081 ins->sreg2 = args [1]->dreg;
5082 MONO_ADD_INS (cfg->cbb, ins);
5090 mini_emit_inst_for_sharable_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5092 if (cmethod->klass == mono_defaults.array_class) {
5093 if (strcmp (cmethod->name, "UnsafeStore") == 0)
5094 return emit_array_unsafe_access (cfg, fsig, args, TRUE);
5095 if (strcmp (cmethod->name, "UnsafeLoad") == 0)
5096 return emit_array_unsafe_access (cfg, fsig, args, FALSE);
5103 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5105 MonoInst *ins = NULL;
5107 static MonoClass *runtime_helpers_class = NULL;
5108 if (! runtime_helpers_class)
5109 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
5110 "System.Runtime.CompilerServices", "RuntimeHelpers");
5112 if (cmethod->klass == mono_defaults.string_class) {
5113 if (strcmp (cmethod->name, "get_Chars") == 0) {
5114 int dreg = alloc_ireg (cfg);
5115 int index_reg = alloc_preg (cfg);
5116 int mult_reg = alloc_preg (cfg);
5117 int add_reg = alloc_preg (cfg);
5119 #if SIZEOF_REGISTER == 8
5120 /* The array reg is 64 bits but the index reg is only 32 */
5121 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
5123 index_reg = args [1]->dreg;
5125 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
5127 #if defined(TARGET_X86) || defined(TARGET_AMD64)
5128 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, G_STRUCT_OFFSET (MonoString, chars));
5129 add_reg = ins->dreg;
5130 /* Avoid a warning */
5132 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5135 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
5136 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
5137 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5138 add_reg, G_STRUCT_OFFSET (MonoString, chars));
5140 type_from_op (ins, NULL, NULL);
5142 } else if (strcmp (cmethod->name, "get_Length") == 0) {
5143 int dreg = alloc_ireg (cfg);
5144 /* Decompose later to allow more optimizations */
5145 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
5146 ins->type = STACK_I4;
5147 ins->flags |= MONO_INST_FAULT;
5148 cfg->cbb->has_array_access = TRUE;
5149 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
5152 } else if (strcmp (cmethod->name, "InternalSetChar") == 0) {
5153 int mult_reg = alloc_preg (cfg);
5154 int add_reg = alloc_preg (cfg);
5156 /* The corlib functions check for oob already. */
5157 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
5158 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
5159 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, G_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
5160 return cfg->cbb->last_ins;
5163 } else if (cmethod->klass == mono_defaults.object_class) {
5165 if (strcmp (cmethod->name, "GetType") == 0) {
5166 int dreg = alloc_ireg_ref (cfg);
5167 int vt_reg = alloc_preg (cfg);
5168 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
5169 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, G_STRUCT_OFFSET (MonoVTable, type));
5170 type_from_op (ins, NULL, NULL);
5173 #if !defined(MONO_ARCH_EMULATE_MUL_DIV)
5174 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0 && !mono_gc_is_moving ()) {
5175 int dreg = alloc_ireg (cfg);
5176 int t1 = alloc_ireg (cfg);
5178 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
5179 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
5180 ins->type = STACK_I4;
5184 } else if (strcmp (cmethod->name, ".ctor") == 0) {
5185 MONO_INST_NEW (cfg, ins, OP_NOP);
5186 MONO_ADD_INS (cfg->cbb, ins);
5190 } else if (cmethod->klass == mono_defaults.array_class) {
5191 if (!cfg->gsharedvt && strcmp (cmethod->name + 1, "etGenericValueImpl") == 0)
5192 return emit_array_generic_access (cfg, fsig, args, *cmethod->name == 'S');
5194 #ifndef MONO_BIG_ARRAYS
5196 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
5199 if ((strcmp (cmethod->name, "GetLength") == 0 || strcmp (cmethod->name, "GetLowerBound") == 0) && args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
5200 int dreg = alloc_ireg (cfg);
5201 int bounds_reg = alloc_ireg_mp (cfg);
5202 MonoBasicBlock *end_bb, *szarray_bb;
5203 gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
5205 NEW_BBLOCK (cfg, end_bb);
5206 NEW_BBLOCK (cfg, szarray_bb);
5208 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
5209 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
5210 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
5211 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
5212 /* Non-szarray case */
5214 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5215 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
5217 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5218 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5219 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
5220 MONO_START_BB (cfg, szarray_bb);
5223 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5224 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
5226 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
5227 MONO_START_BB (cfg, end_bb);
5229 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
5230 ins->type = STACK_I4;
5236 if (cmethod->name [0] != 'g')
5239 if (strcmp (cmethod->name, "get_Rank") == 0) {
5240 int dreg = alloc_ireg (cfg);
5241 int vtable_reg = alloc_preg (cfg);
5242 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
5243 args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
5244 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
5245 vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
5246 type_from_op (ins, NULL, NULL);
5249 } else if (strcmp (cmethod->name, "get_Length") == 0) {
5250 int dreg = alloc_ireg (cfg);
5252 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5253 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
5254 type_from_op (ins, NULL, NULL);
5259 } else if (cmethod->klass == runtime_helpers_class) {
5261 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0) {
5262 EMIT_NEW_ICONST (cfg, ins, G_STRUCT_OFFSET (MonoString, chars));
5266 } else if (cmethod->klass == mono_defaults.thread_class) {
5267 if (strcmp (cmethod->name, "SpinWait_nop") == 0) {
5268 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
5269 MONO_ADD_INS (cfg->cbb, ins);
5271 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0) {
5272 return emit_memory_barrier (cfg, FullBarrier);
5274 } else if (cmethod->klass == mono_defaults.monitor_class) {
5276 /* FIXME this should be integrated to the check below once we support the trampoline version */
5277 #if defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
5278 if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 2) {
5279 MonoMethod *fast_method = NULL;
5281 /* Avoid infinite recursion */
5282 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN && !strcmp (cfg->method->name, "FastMonitorEnterV4"))
5285 fast_method = mono_monitor_get_fast_path (cmethod);
5289 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
5293 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
5294 if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 1) {
5297 if (COMPILE_LLVM (cfg)) {
5299 * Pass the argument normally, the LLVM backend will handle the
5300 * calling convention problems.
5302 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
5304 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
5305 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
5306 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
5307 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
5310 return (MonoInst*)call;
5311 } else if (strcmp (cmethod->name, "Exit") == 0) {
5314 if (COMPILE_LLVM (cfg)) {
5315 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
5317 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
5318 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
5319 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
5320 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
5323 return (MonoInst*)call;
5325 #elif defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
5327 MonoMethod *fast_method = NULL;
5329 /* Avoid infinite recursion */
5330 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN &&
5331 (strcmp (cfg->method->name, "FastMonitorEnter") == 0 ||
5332 strcmp (cfg->method->name, "FastMonitorExit") == 0))
5335 if ((strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 2) ||
5336 strcmp (cmethod->name, "Exit") == 0)
5337 fast_method = mono_monitor_get_fast_path (cmethod);
5341 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
5344 } else if (cmethod->klass->image == mono_defaults.corlib &&
5345 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
5346 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
5349 #if SIZEOF_REGISTER == 8
5350 if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
5351 /* 64 bit reads are already atomic */
5352 MONO_INST_NEW (cfg, ins, OP_LOADI8_MEMBASE);
5353 ins->dreg = mono_alloc_preg (cfg);
5354 ins->inst_basereg = args [0]->dreg;
5355 ins->inst_offset = 0;
5356 MONO_ADD_INS (cfg->cbb, ins);
5360 #ifdef MONO_ARCH_HAVE_ATOMIC_ADD
5361 if (strcmp (cmethod->name, "Increment") == 0) {
5362 MonoInst *ins_iconst;
5365 if (fsig->params [0]->type == MONO_TYPE_I4) {
5366 opcode = OP_ATOMIC_ADD_NEW_I4;
5367 cfg->has_atomic_add_new_i4 = TRUE;
5369 #if SIZEOF_REGISTER == 8
5370 else if (fsig->params [0]->type == MONO_TYPE_I8)
5371 opcode = OP_ATOMIC_ADD_NEW_I8;
5374 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
5375 ins_iconst->inst_c0 = 1;
5376 ins_iconst->dreg = mono_alloc_ireg (cfg);
5377 MONO_ADD_INS (cfg->cbb, ins_iconst);
5379 MONO_INST_NEW (cfg, ins, opcode);
5380 ins->dreg = mono_alloc_ireg (cfg);
5381 ins->inst_basereg = args [0]->dreg;
5382 ins->inst_offset = 0;
5383 ins->sreg2 = ins_iconst->dreg;
5384 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
5385 MONO_ADD_INS (cfg->cbb, ins);
5387 } else if (strcmp (cmethod->name, "Decrement") == 0) {
5388 MonoInst *ins_iconst;
5391 if (fsig->params [0]->type == MONO_TYPE_I4) {
5392 opcode = OP_ATOMIC_ADD_NEW_I4;
5393 cfg->has_atomic_add_new_i4 = TRUE;
5395 #if SIZEOF_REGISTER == 8
5396 else if (fsig->params [0]->type == MONO_TYPE_I8)
5397 opcode = OP_ATOMIC_ADD_NEW_I8;
5400 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
5401 ins_iconst->inst_c0 = -1;
5402 ins_iconst->dreg = mono_alloc_ireg (cfg);
5403 MONO_ADD_INS (cfg->cbb, ins_iconst);
5405 MONO_INST_NEW (cfg, ins, opcode);
5406 ins->dreg = mono_alloc_ireg (cfg);
5407 ins->inst_basereg = args [0]->dreg;
5408 ins->inst_offset = 0;
5409 ins->sreg2 = ins_iconst->dreg;
5410 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
5411 MONO_ADD_INS (cfg->cbb, ins);
5413 } else if (strcmp (cmethod->name, "Add") == 0) {
5416 if (fsig->params [0]->type == MONO_TYPE_I4) {
5417 opcode = OP_ATOMIC_ADD_NEW_I4;
5418 cfg->has_atomic_add_new_i4 = TRUE;
5420 #if SIZEOF_REGISTER == 8
5421 else if (fsig->params [0]->type == MONO_TYPE_I8)
5422 opcode = OP_ATOMIC_ADD_NEW_I8;
5426 MONO_INST_NEW (cfg, ins, opcode);
5427 ins->dreg = mono_alloc_ireg (cfg);
5428 ins->inst_basereg = args [0]->dreg;
5429 ins->inst_offset = 0;
5430 ins->sreg2 = args [1]->dreg;
5431 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
5432 MONO_ADD_INS (cfg->cbb, ins);
5435 #endif /* MONO_ARCH_HAVE_ATOMIC_ADD */
5437 #ifdef MONO_ARCH_HAVE_ATOMIC_EXCHANGE
5438 if (strcmp (cmethod->name, "Exchange") == 0) {
5440 gboolean is_ref = fsig->params [0]->type == MONO_TYPE_OBJECT;
5442 if (fsig->params [0]->type == MONO_TYPE_I4) {
5443 opcode = OP_ATOMIC_EXCHANGE_I4;
5444 cfg->has_atomic_exchange_i4 = TRUE;
5446 #if SIZEOF_REGISTER == 8
5447 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I8) ||
5448 (fsig->params [0]->type == MONO_TYPE_I))
5449 opcode = OP_ATOMIC_EXCHANGE_I8;
5451 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I)) {
5452 opcode = OP_ATOMIC_EXCHANGE_I4;
5453 cfg->has_atomic_exchange_i4 = TRUE;
5459 MONO_INST_NEW (cfg, ins, opcode);
5460 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : mono_alloc_ireg (cfg);
5461 ins->inst_basereg = args [0]->dreg;
5462 ins->inst_offset = 0;
5463 ins->sreg2 = args [1]->dreg;
5464 MONO_ADD_INS (cfg->cbb, ins);
5466 switch (fsig->params [0]->type) {
5468 ins->type = STACK_I4;
5472 ins->type = STACK_I8;
5474 case MONO_TYPE_OBJECT:
5475 ins->type = STACK_OBJ;
5478 g_assert_not_reached ();
5481 if (cfg->gen_write_barriers && is_ref)
5482 emit_write_barrier (cfg, args [0], args [1]);
5484 #endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
5486 #ifdef MONO_ARCH_HAVE_ATOMIC_CAS
5487 if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
5489 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [1]);
5490 if (fsig->params [1]->type == MONO_TYPE_I4)
5492 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I)
5493 size = sizeof (gpointer);
5494 else if (sizeof (gpointer) == 8 && fsig->params [1]->type == MONO_TYPE_I8)
5497 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
5498 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
5499 ins->sreg1 = args [0]->dreg;
5500 ins->sreg2 = args [1]->dreg;
5501 ins->sreg3 = args [2]->dreg;
5502 ins->type = STACK_I4;
5503 MONO_ADD_INS (cfg->cbb, ins);
5504 } else if (size == 8) {
5505 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I8);
5506 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
5507 ins->sreg1 = args [0]->dreg;
5508 ins->sreg2 = args [1]->dreg;
5509 ins->sreg3 = args [2]->dreg;
5510 ins->type = STACK_I8;
5511 MONO_ADD_INS (cfg->cbb, ins);
5513 /* g_assert_not_reached (); */
5515 if (cfg->gen_write_barriers && is_ref)
5516 emit_write_barrier (cfg, args [0], args [1]);
5518 #endif /* MONO_ARCH_HAVE_ATOMIC_CAS */
5520 if (strcmp (cmethod->name, "MemoryBarrier") == 0)
5521 ins = emit_memory_barrier (cfg, FullBarrier);
5525 } else if (cmethod->klass->image == mono_defaults.corlib) {
5526 if (cmethod->name [0] == 'B' && strcmp (cmethod->name, "Break") == 0
5527 && strcmp (cmethod->klass->name, "Debugger") == 0) {
5528 if (should_insert_brekpoint (cfg->method)) {
5529 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
5531 MONO_INST_NEW (cfg, ins, OP_NOP);
5532 MONO_ADD_INS (cfg->cbb, ins);
5536 if (cmethod->name [0] == 'g' && strcmp (cmethod->name, "get_IsRunningOnWindows") == 0
5537 && strcmp (cmethod->klass->name, "Environment") == 0) {
5539 EMIT_NEW_ICONST (cfg, ins, 1);
5541 EMIT_NEW_ICONST (cfg, ins, 0);
5545 } else if (cmethod->klass == mono_defaults.math_class) {
5547 * There is general branches code for Min/Max, but it does not work for
5549 * http://everything2.com/?node_id=1051618
5551 } else if ((!strcmp (cmethod->klass->image->assembly->aname.name, "MonoMac") || !strcmp (cmethod->klass->image->assembly->aname.name, "monotouch")) && !strcmp (cmethod->klass->name, "Selector") && !strcmp (cmethod->name, "GetHandle") && cfg->compile_aot && (args [0]->opcode == OP_GOT_ENTRY || args[0]->opcode == OP_AOTCONST)) {
5552 #ifdef MONO_ARCH_HAVE_OBJC_GET_SELECTOR
5554 MonoJumpInfoToken *ji;
5557 cfg->disable_llvm = TRUE;
5559 if (args [0]->opcode == OP_GOT_ENTRY) {
5560 pi = args [0]->inst_p1;
5561 g_assert (pi->opcode == OP_PATCH_INFO);
5562 g_assert ((int)pi->inst_p1 == MONO_PATCH_INFO_LDSTR);
5565 g_assert ((int)args [0]->inst_p1 == MONO_PATCH_INFO_LDSTR);
5566 ji = args [0]->inst_p0;
5569 NULLIFY_INS (args [0]);
5572 s = mono_ldstr (cfg->domain, ji->image, mono_metadata_token_index (ji->token));
5573 MONO_INST_NEW (cfg, ins, OP_OBJC_GET_SELECTOR);
5574 ins->dreg = mono_alloc_ireg (cfg);
5576 ins->inst_p0 = mono_string_to_utf8 (s);
5577 MONO_ADD_INS (cfg->cbb, ins);
5582 #ifdef MONO_ARCH_SIMD_INTRINSICS
5583 if (cfg->opt & MONO_OPT_SIMD) {
5584 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
5590 ins = mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
5594 if (COMPILE_LLVM (cfg)) {
5595 ins = llvm_emit_inst_for_method (cfg, cmethod, fsig, args);
5600 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
5604 * This entry point could be used later for arbitrary method
5607 inline static MonoInst*
5608 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
5609 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
5611 if (method->klass == mono_defaults.string_class) {
5612 /* managed string allocation support */
5613 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS) && !(cfg->opt & MONO_OPT_SHARED)) {
5614 MonoInst *iargs [2];
5615 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
5616 MonoMethod *managed_alloc = NULL;
5618 g_assert (vtable); /*Should not fail since it System.String*/
5619 #ifndef MONO_CROSS_COMPILE
5620 managed_alloc = mono_gc_get_managed_allocator (method->klass, FALSE);
5624 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
5625 iargs [1] = args [0];
5626 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
5633 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
5635 MonoInst *store, *temp;
5638 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
5639 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
5642 * FIXME: We should use *args++ = sp [0], but that would mean the arg
5643 * would be different than the MonoInst's used to represent arguments, and
5644 * the ldelema implementation can't deal with that.
5645 * Solution: When ldelema is used on an inline argument, create a var for
5646 * it, emit ldelema on that var, and emit the saving code below in
5647 * inline_method () if needed.
5649 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
5650 cfg->args [i] = temp;
5651 /* This uses cfg->args [i] which is set by the preceeding line */
5652 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
5653 store->cil_code = sp [0]->cil_code;
5658 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
5659 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
5661 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
5663 check_inline_called_method_name_limit (MonoMethod *called_method)
5666 static const char *limit = NULL;
5668 if (limit == NULL) {
5669 const char *limit_string = g_getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
5671 if (limit_string != NULL)
5672 limit = limit_string;
5677 if (limit [0] != '\0') {
5678 char *called_method_name = mono_method_full_name (called_method, TRUE);
5680 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
5681 g_free (called_method_name);
5683 //return (strncmp_result <= 0);
5684 return (strncmp_result == 0);
5691 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
5693 check_inline_caller_method_name_limit (MonoMethod *caller_method)
5696 static const char *limit = NULL;
5698 if (limit == NULL) {
5699 const char *limit_string = g_getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
5700 if (limit_string != NULL) {
5701 limit = limit_string;
5707 if (limit [0] != '\0') {
5708 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
5710 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
5711 g_free (caller_method_name);
5713 //return (strncmp_result <= 0);
5714 return (strncmp_result == 0);
5722 emit_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
5724 static double r8_0 = 0.0;
5728 rtype = mini_replace_type (rtype);
5732 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
5733 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
5734 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
5735 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
5736 MONO_EMIT_NEW_I8CONST (cfg, dreg, 0);
5737 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
5738 MONO_INST_NEW (cfg, ins, OP_R8CONST);
5739 ins->type = STACK_R8;
5740 ins->inst_p0 = (void*)&r8_0;
5742 MONO_ADD_INS (cfg->cbb, ins);
5743 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
5744 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
5745 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
5746 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (cfg, rtype)) {
5747 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
5749 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
5754 emit_init_local (MonoCompile *cfg, int local, MonoType *type)
5756 MonoInst *var = cfg->locals [local];
5757 if (COMPILE_SOFT_FLOAT (cfg)) {
5759 int reg = alloc_dreg (cfg, var->type);
5760 emit_init_rvar (cfg, reg, type);
5761 EMIT_NEW_LOCSTORE (cfg, store, local, cfg->cbb->last_ins);
5763 emit_init_rvar (cfg, var->dreg, type);
5768 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
5769 guchar *ip, guint real_offset, GList *dont_inline, gboolean inline_always)
5771 MonoInst *ins, *rvar = NULL;
5772 MonoMethodHeader *cheader;
5773 MonoBasicBlock *ebblock, *sbblock;
5775 MonoMethod *prev_inlined_method;
5776 MonoInst **prev_locals, **prev_args;
5777 MonoType **prev_arg_types;
5778 guint prev_real_offset;
5779 GHashTable *prev_cbb_hash;
5780 MonoBasicBlock **prev_cil_offset_to_bb;
5781 MonoBasicBlock *prev_cbb;
5782 unsigned char* prev_cil_start;
5783 guint32 prev_cil_offset_to_bb_len;
5784 MonoMethod *prev_current_method;
5785 MonoGenericContext *prev_generic_context;
5786 gboolean ret_var_set, prev_ret_var_set, virtual = FALSE;
5788 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
5790 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
5791 if ((! inline_always) && ! check_inline_called_method_name_limit (cmethod))
5794 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
5795 if ((! inline_always) && ! check_inline_caller_method_name_limit (cfg->method))
5799 if (cfg->verbose_level > 2)
5800 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
5802 if (!cmethod->inline_info) {
5803 cfg->stat_inlineable_methods++;
5804 cmethod->inline_info = 1;
5807 /* allocate local variables */
5808 cheader = mono_method_get_header (cmethod);
5810 if (cheader == NULL || mono_loader_get_last_error ()) {
5811 MonoLoaderError *error = mono_loader_get_last_error ();
5814 mono_metadata_free_mh (cheader);
5815 if (inline_always && error)
5816 mono_cfg_set_exception (cfg, error->exception_type);
5818 mono_loader_clear_error ();
5822 /*Must verify before creating locals as it can cause the JIT to assert.*/
5823 if (mono_compile_is_broken (cfg, cmethod, FALSE)) {
5824 mono_metadata_free_mh (cheader);
5828 /* allocate space to store the return value */
5829 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
5830 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
5833 prev_locals = cfg->locals;
5834 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
5835 for (i = 0; i < cheader->num_locals; ++i)
5836 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
5838 /* allocate start and end blocks */
5839 /* This is needed so if the inline is aborted, we can clean up */
5840 NEW_BBLOCK (cfg, sbblock);
5841 sbblock->real_offset = real_offset;
5843 NEW_BBLOCK (cfg, ebblock);
5844 ebblock->block_num = cfg->num_bblocks++;
5845 ebblock->real_offset = real_offset;
5847 prev_args = cfg->args;
5848 prev_arg_types = cfg->arg_types;
5849 prev_inlined_method = cfg->inlined_method;
5850 cfg->inlined_method = cmethod;
5851 cfg->ret_var_set = FALSE;
5852 cfg->inline_depth ++;
5853 prev_real_offset = cfg->real_offset;
5854 prev_cbb_hash = cfg->cbb_hash;
5855 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
5856 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
5857 prev_cil_start = cfg->cil_start;
5858 prev_cbb = cfg->cbb;
5859 prev_current_method = cfg->current_method;
5860 prev_generic_context = cfg->generic_context;
5861 prev_ret_var_set = cfg->ret_var_set;
5863 if (*ip == CEE_CALLVIRT && !(cmethod->flags & METHOD_ATTRIBUTE_STATIC))
5866 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, dont_inline, sp, real_offset, virtual);
5868 ret_var_set = cfg->ret_var_set;
5870 cfg->inlined_method = prev_inlined_method;
5871 cfg->real_offset = prev_real_offset;
5872 cfg->cbb_hash = prev_cbb_hash;
5873 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
5874 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
5875 cfg->cil_start = prev_cil_start;
5876 cfg->locals = prev_locals;
5877 cfg->args = prev_args;
5878 cfg->arg_types = prev_arg_types;
5879 cfg->current_method = prev_current_method;
5880 cfg->generic_context = prev_generic_context;
5881 cfg->ret_var_set = prev_ret_var_set;
5882 cfg->inline_depth --;
5884 if ((costs >= 0 && costs < 60) || inline_always) {
5885 if (cfg->verbose_level > 2)
5886 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
5888 cfg->stat_inlined_methods++;
5890 /* always add some code to avoid block split failures */
5891 MONO_INST_NEW (cfg, ins, OP_NOP);
5892 MONO_ADD_INS (prev_cbb, ins);
5894 prev_cbb->next_bb = sbblock;
5895 link_bblock (cfg, prev_cbb, sbblock);
5898 * Get rid of the begin and end bblocks if possible to aid local
5901 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
5903 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
5904 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
5906 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
5907 MonoBasicBlock *prev = ebblock->in_bb [0];
5908 mono_merge_basic_blocks (cfg, prev, ebblock);
5910 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
5911 mono_merge_basic_blocks (cfg, prev_cbb, prev);
5912 cfg->cbb = prev_cbb;
5916 * Its possible that the rvar is set in some prev bblock, but not in others.
5922 for (i = 0; i < ebblock->in_count; ++i) {
5923 bb = ebblock->in_bb [i];
5925 if (bb->last_ins && bb->last_ins->opcode == OP_NOT_REACHED) {
5928 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
5938 * If the inlined method contains only a throw, then the ret var is not
5939 * set, so set it to a dummy value.
5942 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
5944 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
5947 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
5950 if (cfg->verbose_level > 2)
5951 printf ("INLINE ABORTED %s (cost %d)\n", mono_method_full_name (cmethod, TRUE), costs);
5952 cfg->exception_type = MONO_EXCEPTION_NONE;
5953 mono_loader_clear_error ();
5955 /* This gets rid of the newly added bblocks */
5956 cfg->cbb = prev_cbb;
5958 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
5963 * Some of these comments may well be out-of-date.
5964 * Design decisions: we do a single pass over the IL code (and we do bblock
5965 * splitting/merging in the few cases when it's required: a back jump to an IL
5966 * address that was not already seen as bblock starting point).
5967 * Code is validated as we go (full verification is still better left to metadata/verify.c).
5968 * Complex operations are decomposed in simpler ones right away. We need to let the
5969 * arch-specific code peek and poke inside this process somehow (except when the
5970 * optimizations can take advantage of the full semantic info of coarse opcodes).
5971 * All the opcodes of the form opcode.s are 'normalized' to opcode.
5972 * MonoInst->opcode initially is the IL opcode or some simplification of that
5973 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
5974 * opcode with value bigger than OP_LAST.
5975 * At this point the IR can be handed over to an interpreter, a dumb code generator
5976 * or to the optimizing code generator that will translate it to SSA form.
5978 * Profiling directed optimizations.
5979 * We may compile by default with few or no optimizations and instrument the code
5980 * or the user may indicate what methods to optimize the most either in a config file
5981 * or through repeated runs where the compiler applies offline the optimizations to
5982 * each method and then decides if it was worth it.
5985 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
5986 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
5987 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
5988 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
5989 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
5990 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
5991 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
5992 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) {cfg->exception_ptr = klass; LOAD_ERROR;}
5994 /* offset from br.s -> br like opcodes */
5995 #define BIG_BRANCH_OFFSET 13
5998 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
6000 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
6002 return b == NULL || b == bb;
6006 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
6008 unsigned char *ip = start;
6009 unsigned char *target;
6012 MonoBasicBlock *bblock;
6013 const MonoOpcode *opcode;
6016 cli_addr = ip - start;
6017 i = mono_opcode_value ((const guint8 **)&ip, end);
6020 opcode = &mono_opcodes [i];
6021 switch (opcode->argument) {
6022 case MonoInlineNone:
6025 case MonoInlineString:
6026 case MonoInlineType:
6027 case MonoInlineField:
6028 case MonoInlineMethod:
6031 case MonoShortInlineR:
6038 case MonoShortInlineVar:
6039 case MonoShortInlineI:
6042 case MonoShortInlineBrTarget:
6043 target = start + cli_addr + 2 + (signed char)ip [1];
6044 GET_BBLOCK (cfg, bblock, target);
6047 GET_BBLOCK (cfg, bblock, ip);
6049 case MonoInlineBrTarget:
6050 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
6051 GET_BBLOCK (cfg, bblock, target);
6054 GET_BBLOCK (cfg, bblock, ip);
6056 case MonoInlineSwitch: {
6057 guint32 n = read32 (ip + 1);
6060 cli_addr += 5 + 4 * n;
6061 target = start + cli_addr;
6062 GET_BBLOCK (cfg, bblock, target);
6064 for (j = 0; j < n; ++j) {
6065 target = start + cli_addr + (gint32)read32 (ip);
6066 GET_BBLOCK (cfg, bblock, target);
6076 g_assert_not_reached ();
6079 if (i == CEE_THROW) {
6080 unsigned char *bb_start = ip - 1;
6082 /* Find the start of the bblock containing the throw */
6084 while ((bb_start >= start) && !bblock) {
6085 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
6089 bblock->out_of_line = 1;
6099 static inline MonoMethod *
6100 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
6104 if (m->wrapper_type != MONO_WRAPPER_NONE) {
6105 method = mono_method_get_wrapper_data (m, token);
6107 method = mono_class_inflate_generic_method (method, context);
6109 method = mono_get_method_full (m->klass->image, token, klass, context);
6115 static inline MonoMethod *
6116 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
6118 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
6120 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
6126 static inline MonoClass*
6127 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
6131 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6132 klass = mono_method_get_wrapper_data (method, token);
6134 klass = mono_class_inflate_generic_class (klass, context);
6136 klass = mono_class_get_full (method->klass->image, token, context);
6139 mono_class_init (klass);
6143 static inline MonoMethodSignature*
6144 mini_get_signature (MonoMethod *method, guint32 token, MonoGenericContext *context)
6146 MonoMethodSignature *fsig;
6148 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6151 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
6153 fsig = mono_inflate_generic_signature (fsig, context, &error);
6155 g_assert (mono_error_ok (&error));
6158 fsig = mono_metadata_parse_signature (method->klass->image, token);
6164 * Returns TRUE if the JIT should abort inlining because "callee"
6165 * is influenced by security attributes.
6168 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
6172 if ((cfg->method != caller) && mono_security_method_has_declsec (callee)) {
6176 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
6177 if (result == MONO_JIT_SECURITY_OK)
6180 if (result == MONO_JIT_LINKDEMAND_ECMA) {
6181 /* Generate code to throw a SecurityException before the actual call/link */
6182 MonoSecurityManager *secman = mono_security_manager_get_methods ();
6185 NEW_ICONST (cfg, args [0], 4);
6186 NEW_METHODCONST (cfg, args [1], caller);
6187 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
6188 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
6189 /* don't hide previous results */
6190 mono_cfg_set_exception (cfg, MONO_EXCEPTION_SECURITY_LINKDEMAND);
6191 cfg->exception_data = result;
6199 throw_exception (void)
6201 static MonoMethod *method = NULL;
6204 MonoSecurityManager *secman = mono_security_manager_get_methods ();
6205 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
6212 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
6214 MonoMethod *thrower = throw_exception ();
6217 EMIT_NEW_PCONST (cfg, args [0], ex);
6218 mono_emit_method_call (cfg, thrower, args, NULL);
6222 * Return the original method is a wrapper is specified. We can only access
6223 * the custom attributes from the original method.
6226 get_original_method (MonoMethod *method)
6228 if (method->wrapper_type == MONO_WRAPPER_NONE)
6231 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
6232 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
6235 /* in other cases we need to find the original method */
6236 return mono_marshal_method_from_wrapper (method);
6240 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
6241 MonoBasicBlock *bblock, unsigned char *ip)
6243 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
6244 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
6246 emit_throw_exception (cfg, ex);
6250 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
6251 MonoBasicBlock *bblock, unsigned char *ip)
6253 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
6254 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
6256 emit_throw_exception (cfg, ex);
6260 * Check that the IL instructions at ip are the array initialization
6261 * sequence and return the pointer to the data and the size.
6264 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
6267 * newarr[System.Int32]
6269 * ldtoken field valuetype ...
6270 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
6272 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
6273 guint32 token = read32 (ip + 7);
6274 guint32 field_token = read32 (ip + 2);
6275 guint32 field_index = field_token & 0xffffff;
6277 const char *data_ptr;
6279 MonoMethod *cmethod;
6280 MonoClass *dummy_class;
6281 MonoClassField *field = mono_field_from_token (method->klass->image, field_token, &dummy_class, NULL);
6287 *out_field_token = field_token;
6289 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
6292 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
6294 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
6295 case MONO_TYPE_BOOLEAN:
6299 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
6300 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
6301 case MONO_TYPE_CHAR:
6318 if (size > mono_type_size (field->type, &dummy_align))
6321 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
6322 if (!method->klass->image->dynamic) {
6323 field_index = read32 (ip + 2) & 0xffffff;
6324 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
6325 data_ptr = mono_image_rva_map (method->klass->image, rva);
6326 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
6327 /* for aot code we do the lookup on load */
6328 if (aot && data_ptr)
6329 return GUINT_TO_POINTER (rva);
6331 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
6333 data_ptr = mono_field_get_data (field);
6341 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
6343 char *method_fname = mono_method_full_name (method, TRUE);
6345 MonoMethodHeader *header = mono_method_get_header (method);
6347 if (header->code_size == 0)
6348 method_code = g_strdup ("method body is empty.");
6350 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
6351 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
6352 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
6353 g_free (method_fname);
6354 g_free (method_code);
6355 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
6359 set_exception_object (MonoCompile *cfg, MonoException *exception)
6361 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OBJECT_SUPPLIED);
6362 MONO_GC_REGISTER_ROOT_SINGLE (cfg->exception_ptr);
6363 cfg->exception_ptr = exception;
6367 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
6370 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
6371 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
6372 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
6373 /* Optimize reg-reg moves away */
6375 * Can't optimize other opcodes, since sp[0] might point to
6376 * the last ins of a decomposed opcode.
6378 sp [0]->dreg = (cfg)->locals [n]->dreg;
6380 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
6385 * ldloca inhibits many optimizations so try to get rid of it in common
6388 static inline unsigned char *
6389 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
6399 local = read16 (ip + 2);
6403 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
6404 /* From the INITOBJ case */
6405 token = read32 (ip + 2);
6406 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
6407 CHECK_TYPELOAD (klass);
6408 type = mini_replace_type (&klass->byval_arg);
6409 emit_init_local (cfg, local, type);
6417 is_exception_class (MonoClass *class)
6420 if (class == mono_defaults.exception_class)
6422 class = class->parent;
6428 * is_jit_optimizer_disabled:
6430 * Determine whenever M's assembly has a DebuggableAttribute with the
6431 * IsJITOptimizerDisabled flag set.
6434 is_jit_optimizer_disabled (MonoMethod *m)
6436 MonoAssembly *ass = m->klass->image->assembly;
6437 MonoCustomAttrInfo* attrs;
6438 static MonoClass *klass;
6440 gboolean val = FALSE;
6443 if (ass->jit_optimizer_disabled_inited)
6444 return ass->jit_optimizer_disabled;
6447 klass = mono_class_from_name (mono_defaults.corlib, "System.Diagnostics", "DebuggableAttribute");
6450 ass->jit_optimizer_disabled = FALSE;
6451 mono_memory_barrier ();
6452 ass->jit_optimizer_disabled_inited = TRUE;
6456 attrs = mono_custom_attrs_from_assembly (ass);
6458 for (i = 0; i < attrs->num_attrs; ++i) {
6459 MonoCustomAttrEntry *attr = &attrs->attrs [i];
6462 MonoMethodSignature *sig;
6464 if (!attr->ctor || attr->ctor->klass != klass)
6466 /* Decode the attribute. See reflection.c */
6467 len = attr->data_size;
6468 p = (const char*)attr->data;
6469 g_assert (read16 (p) == 0x0001);
6472 // FIXME: Support named parameters
6473 sig = mono_method_signature (attr->ctor);
6474 if (sig->param_count != 2 || sig->params [0]->type != MONO_TYPE_BOOLEAN || sig->params [1]->type != MONO_TYPE_BOOLEAN)
6476 /* Two boolean arguments */
6480 mono_custom_attrs_free (attrs);
6483 ass->jit_optimizer_disabled = val;
6484 mono_memory_barrier ();
6485 ass->jit_optimizer_disabled_inited = TRUE;
6491 is_supported_tail_call (MonoCompile *cfg, MonoMethod *method, MonoMethod *cmethod, MonoMethodSignature *fsig, int call_opcode)
6493 gboolean supported_tail_call;
6496 #ifdef MONO_ARCH_HAVE_OP_TAIL_CALL
6497 supported_tail_call = mono_arch_tail_call_supported (cfg, mono_method_signature (method), mono_method_signature (cmethod));
6499 supported_tail_call = mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret);
6502 for (i = 0; i < fsig->param_count; ++i) {
6503 if (fsig->params [i]->byref || fsig->params [i]->type == MONO_TYPE_PTR || fsig->params [i]->type == MONO_TYPE_FNPTR)
6504 /* These can point to the current method's stack */
6505 supported_tail_call = FALSE;
6507 if (fsig->hasthis && cmethod->klass->valuetype)
6508 /* this might point to the current method's stack */
6509 supported_tail_call = FALSE;
6510 if (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)
6511 supported_tail_call = FALSE;
6512 if (cfg->method->save_lmf)
6513 supported_tail_call = FALSE;
6514 if (cmethod->wrapper_type && cmethod->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
6515 supported_tail_call = FALSE;
6516 if (call_opcode != CEE_CALL)
6517 supported_tail_call = FALSE;
6519 /* Debugging support */
6521 if (supported_tail_call) {
6522 if (!mono_debug_count ())
6523 supported_tail_call = FALSE;
6527 return supported_tail_call;
6530 /* the JIT intercepts ldflda instructions to the tlsdata field in ThreadLocal<T> and redirects
6531 * it to the thread local value based on the tls_offset field. Every other kind of access to
6532 * the field causes an assert.
6535 is_magic_tls_access (MonoClassField *field)
6537 if (strcmp (field->name, "tlsdata"))
6539 if (strcmp (field->parent->name, "ThreadLocal`1"))
6541 return field->parent->image == mono_defaults.corlib;
6544 /* emits the code needed to access a managed tls var (like ThreadStatic)
6545 * with the value of the tls offset in offset_reg. thread_ins represents the MonoInternalThread
6546 * pointer for the current thread.
6547 * Returns the MonoInst* representing the address of the tls var.
6550 emit_managed_static_data_access (MonoCompile *cfg, MonoInst *thread_ins, int offset_reg)
6553 int static_data_reg, array_reg, dreg;
6554 int offset2_reg, idx_reg;
6555 // inlined access to the tls data
6556 // idx = (offset >> 24) - 1;
6557 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
6558 static_data_reg = alloc_ireg (cfg);
6559 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, G_STRUCT_OFFSET (MonoInternalThread, static_data));
6560 idx_reg = alloc_ireg (cfg);
6561 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
6562 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
6563 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
6564 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
6565 array_reg = alloc_ireg (cfg);
6566 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
6567 offset2_reg = alloc_ireg (cfg);
6568 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
6569 dreg = alloc_ireg (cfg);
6570 EMIT_NEW_BIALU (cfg, addr, OP_PADD, dreg, array_reg, offset2_reg);
6575 * redirect access to the tlsdata field to the tls var given by the tls_offset field.
6576 * this address is cached per-method in cached_tls_addr.
6579 create_magic_tls_access (MonoCompile *cfg, MonoClassField *tls_field, MonoInst **cached_tls_addr, MonoInst *thread_local)
6581 MonoInst *load, *addr, *temp, *store, *thread_ins;
6582 MonoClassField *offset_field;
6584 if (*cached_tls_addr) {
6585 EMIT_NEW_TEMPLOAD (cfg, addr, (*cached_tls_addr)->inst_c0);
6588 thread_ins = mono_get_thread_intrinsic (cfg);
6589 offset_field = mono_class_get_field_from_name (tls_field->parent, "tls_offset");
6591 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, offset_field->type, thread_local->dreg, offset_field->offset);
6593 MONO_ADD_INS (cfg->cbb, thread_ins);
6595 MonoMethod *thread_method;
6596 thread_method = mono_class_get_method_from_name (mono_get_thread_class(), "CurrentInternalThread_internal", 0);
6597 thread_ins = mono_emit_method_call (cfg, thread_method, NULL, NULL);
6599 addr = emit_managed_static_data_access (cfg, thread_ins, load->dreg);
6600 addr->klass = mono_class_from_mono_type (tls_field->type);
6601 addr->type = STACK_MP;
6602 *cached_tls_addr = temp = mono_compile_create_var (cfg, type_from_stack_type (addr), OP_LOCAL);
6603 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, addr);
6605 EMIT_NEW_TEMPLOAD (cfg, addr, temp->inst_c0);
6610 * mono_method_to_ir:
6612 * Translate the .net IL into linear IR.
6615 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
6616 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
6617 guint inline_offset, gboolean is_virtual_call)
6620 MonoInst *ins, **sp, **stack_start;
6621 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
6622 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
6623 MonoMethod *cmethod, *method_definition;
6624 MonoInst **arg_array;
6625 MonoMethodHeader *header;
6627 guint32 token, ins_flag;
6629 MonoClass *constrained_call = NULL;
6630 unsigned char *ip, *end, *target, *err_pos;
6631 MonoMethodSignature *sig;
6632 MonoGenericContext *generic_context = NULL;
6633 MonoGenericContainer *generic_container = NULL;
6634 MonoType **param_types;
6635 int i, n, start_new_bblock, dreg;
6636 int num_calls = 0, inline_costs = 0;
6637 int breakpoint_id = 0;
6639 MonoBoolean security, pinvoke;
6640 MonoSecurityManager* secman = NULL;
6641 MonoDeclSecurityActions actions;
6642 GSList *class_inits = NULL;
6643 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
6645 gboolean init_locals, seq_points, skip_dead_blocks;
6646 gboolean disable_inline, sym_seq_points = FALSE;
6647 MonoInst *cached_tls_addr = NULL;
6648 MonoDebugMethodInfo *minfo;
6649 MonoBitSet *seq_point_locs = NULL;
6650 MonoBitSet *seq_point_set_locs = NULL;
6652 disable_inline = is_jit_optimizer_disabled (method);
6654 /* serialization and xdomain stuff may need access to private fields and methods */
6655 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
6656 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
6657 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
6658 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
6659 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
6660 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
6662 dont_verify |= mono_security_smcs_hack_enabled ();
6664 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
6665 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
6666 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
6667 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
6668 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_STELEMREF;
6670 image = method->klass->image;
6671 header = mono_method_get_header (method);
6673 MonoLoaderError *error;
6675 if ((error = mono_loader_get_last_error ())) {
6676 mono_cfg_set_exception (cfg, error->exception_type);
6678 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
6679 cfg->exception_message = g_strdup_printf ("Missing or incorrect header for method %s", cfg->method->name);
6681 goto exception_exit;
6683 generic_container = mono_method_get_generic_container (method);
6684 sig = mono_method_signature (method);
6685 num_args = sig->hasthis + sig->param_count;
6686 ip = (unsigned char*)header->code;
6687 cfg->cil_start = ip;
6688 end = ip + header->code_size;
6689 cfg->stat_cil_code_size += header->code_size;
6690 init_locals = header->init_locals;
6692 seq_points = cfg->gen_seq_points && cfg->method == method;
6693 #ifdef PLATFORM_ANDROID
6694 seq_points &= cfg->method->wrapper_type == MONO_WRAPPER_NONE;
6697 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
6698 /* We could hit a seq point before attaching to the JIT (#8338) */
6702 if (cfg->gen_seq_points && cfg->method == method) {
6703 minfo = mono_debug_lookup_method (method);
6705 int i, n_il_offsets;
6709 mono_debug_symfile_get_line_numbers_full (minfo, NULL, NULL, &n_il_offsets, &il_offsets, &line_numbers, NULL, NULL);
6710 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
6711 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
6712 sym_seq_points = TRUE;
6713 for (i = 0; i < n_il_offsets; ++i) {
6714 if (il_offsets [i] < header->code_size)
6715 mono_bitset_set_fast (seq_point_locs, il_offsets [i]);
6717 g_free (il_offsets);
6718 g_free (line_numbers);
6723 * Methods without init_locals set could cause asserts in various passes
6728 method_definition = method;
6729 while (method_definition->is_inflated) {
6730 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
6731 method_definition = imethod->declaring;
6734 /* SkipVerification is not allowed if core-clr is enabled */
6735 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
6737 dont_verify_stloc = TRUE;
6740 if (sig->is_inflated)
6741 generic_context = mono_method_get_context (method);
6742 else if (generic_container)
6743 generic_context = &generic_container->context;
6744 cfg->generic_context = generic_context;
6746 if (!cfg->generic_sharing_context)
6747 g_assert (!sig->has_type_parameters);
6749 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
6750 g_assert (method->is_inflated);
6751 g_assert (mono_method_get_context (method)->method_inst);
6753 if (method->is_inflated && mono_method_get_context (method)->method_inst)
6754 g_assert (sig->generic_param_count);
6756 if (cfg->method == method) {
6757 cfg->real_offset = 0;
6759 cfg->real_offset = inline_offset;
6762 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
6763 cfg->cil_offset_to_bb_len = header->code_size;
6765 cfg->current_method = method;
6767 if (cfg->verbose_level > 2)
6768 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
6770 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
6772 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
6773 for (n = 0; n < sig->param_count; ++n)
6774 param_types [n + sig->hasthis] = sig->params [n];
6775 cfg->arg_types = param_types;
6777 dont_inline = g_list_prepend (dont_inline, method);
6778 if (cfg->method == method) {
6780 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
6781 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
6784 NEW_BBLOCK (cfg, start_bblock);
6785 cfg->bb_entry = start_bblock;
6786 start_bblock->cil_code = NULL;
6787 start_bblock->cil_length = 0;
6788 #if defined(__native_client_codegen__)
6789 MONO_INST_NEW (cfg, ins, OP_NACL_GC_SAFE_POINT);
6790 ins->dreg = alloc_dreg (cfg, STACK_I4);
6791 MONO_ADD_INS (start_bblock, ins);
6795 NEW_BBLOCK (cfg, end_bblock);
6796 cfg->bb_exit = end_bblock;
6797 end_bblock->cil_code = NULL;
6798 end_bblock->cil_length = 0;
6799 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
6800 g_assert (cfg->num_bblocks == 2);
6802 arg_array = cfg->args;
6804 if (header->num_clauses) {
6805 cfg->spvars = g_hash_table_new (NULL, NULL);
6806 cfg->exvars = g_hash_table_new (NULL, NULL);
6808 /* handle exception clauses */
6809 for (i = 0; i < header->num_clauses; ++i) {
6810 MonoBasicBlock *try_bb;
6811 MonoExceptionClause *clause = &header->clauses [i];
6812 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
6813 try_bb->real_offset = clause->try_offset;
6814 try_bb->try_start = TRUE;
6815 try_bb->region = ((i + 1) << 8) | clause->flags;
6816 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
6817 tblock->real_offset = clause->handler_offset;
6818 tblock->flags |= BB_EXCEPTION_HANDLER;
6821 * Linking the try block with the EH block hinders inlining as we won't be able to
6822 * merge the bblocks from inlining and produce an artificial hole for no good reason.
6824 if (COMPILE_LLVM (cfg))
6825 link_bblock (cfg, try_bb, tblock);
6827 if (*(ip + clause->handler_offset) == CEE_POP)
6828 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
6830 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
6831 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
6832 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
6833 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
6834 MONO_ADD_INS (tblock, ins);
6836 if (seq_points && clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY) {
6837 /* finally clauses already have a seq point */
6838 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
6839 MONO_ADD_INS (tblock, ins);
6842 /* todo: is a fault block unsafe to optimize? */
6843 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
6844 tblock->flags |= BB_EXCEPTION_UNSAFE;
6848 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
6850 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
6852 /* catch and filter blocks get the exception object on the stack */
6853 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
6854 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
6855 MonoInst *dummy_use;
6857 /* mostly like handle_stack_args (), but just sets the input args */
6858 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
6859 tblock->in_scount = 1;
6860 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
6861 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
6864 * Add a dummy use for the exvar so its liveness info will be
6868 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
6870 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
6871 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
6872 tblock->flags |= BB_EXCEPTION_HANDLER;
6873 tblock->real_offset = clause->data.filter_offset;
6874 tblock->in_scount = 1;
6875 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
6876 /* The filter block shares the exvar with the handler block */
6877 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
6878 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
6879 MONO_ADD_INS (tblock, ins);
6883 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
6884 clause->data.catch_class &&
6885 cfg->generic_sharing_context &&
6886 mono_class_check_context_used (clause->data.catch_class)) {
6888 * In shared generic code with catch
6889 * clauses containing type variables
6890 * the exception handling code has to
6891 * be able to get to the rgctx.
6892 * Therefore we have to make sure that
6893 * the vtable/mrgctx argument (for
6894 * static or generic methods) or the
6895 * "this" argument (for non-static
6896 * methods) are live.
6898 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
6899 mini_method_get_context (method)->method_inst ||
6900 method->klass->valuetype) {
6901 mono_get_vtable_var (cfg);
6903 MonoInst *dummy_use;
6905 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
6910 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
6911 cfg->cbb = start_bblock;
6912 cfg->args = arg_array;
6913 mono_save_args (cfg, sig, inline_args);
6916 /* FIRST CODE BLOCK */
6917 NEW_BBLOCK (cfg, bblock);
6918 bblock->cil_code = ip;
6922 ADD_BBLOCK (cfg, bblock);
6924 if (cfg->method == method) {
6925 breakpoint_id = mono_debugger_method_has_breakpoint (method);
6926 if (breakpoint_id) {
6927 MONO_INST_NEW (cfg, ins, OP_BREAK);
6928 MONO_ADD_INS (bblock, ins);
6932 if (mono_security_cas_enabled ())
6933 secman = mono_security_manager_get_methods ();
6935 security = (secman && mono_security_method_has_declsec (method));
6936 /* at this point having security doesn't mean we have any code to generate */
6937 if (security && (cfg->method == method)) {
6938 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
6939 * And we do not want to enter the next section (with allocation) if we
6940 * have nothing to generate */
6941 security = mono_declsec_get_demands (method, &actions);
6944 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
6945 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
6947 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
6948 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
6949 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
6951 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
6952 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
6956 mono_custom_attrs_free (custom);
6959 custom = mono_custom_attrs_from_class (wrapped->klass);
6960 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
6964 mono_custom_attrs_free (custom);
6967 /* not a P/Invoke after all */
6972 if ((init_locals || (cfg->method == method && (cfg->opt & MONO_OPT_SHARED))) || cfg->compile_aot || security || pinvoke) {
6973 /* we use a separate basic block for the initialization code */
6974 NEW_BBLOCK (cfg, init_localsbb);
6975 cfg->bb_init = init_localsbb;
6976 init_localsbb->real_offset = cfg->real_offset;
6977 start_bblock->next_bb = init_localsbb;
6978 init_localsbb->next_bb = bblock;
6979 link_bblock (cfg, start_bblock, init_localsbb);
6980 link_bblock (cfg, init_localsbb, bblock);
6982 cfg->cbb = init_localsbb;
6984 start_bblock->next_bb = bblock;
6985 link_bblock (cfg, start_bblock, bblock);
6988 if (cfg->gsharedvt && cfg->method == method) {
6989 MonoGSharedVtMethodInfo *info;
6990 MonoInst *var, *locals_var;
6993 info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoGSharedVtMethodInfo));
6994 info->method = cfg->method;
6995 info->count_entries = 16;
6996 info->entries = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
6997 cfg->gsharedvt_info = info;
6999 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
7000 /* prevent it from being register allocated */
7001 //var->flags |= MONO_INST_VOLATILE;
7002 cfg->gsharedvt_info_var = var;
7004 ins = emit_get_rgctx_gsharedvt_method (cfg, mini_method_check_context_used (cfg, method), method, info);
7005 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, var->dreg, ins->dreg);
7007 /* Allocate locals */
7008 locals_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
7009 /* prevent it from being register allocated */
7010 //locals_var->flags |= MONO_INST_VOLATILE;
7011 cfg->gsharedvt_locals_var = locals_var;
7013 dreg = alloc_ireg (cfg);
7014 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, dreg, var->dreg, G_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, locals_size));
7016 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
7017 ins->dreg = locals_var->dreg;
7019 MONO_ADD_INS (cfg->cbb, ins);
7020 cfg->gsharedvt_locals_var_ins = ins;
7022 cfg->flags |= MONO_CFG_HAS_ALLOCA;
7025 ins->flags |= MONO_INST_INIT;
7029 /* at this point we know, if security is TRUE, that some code needs to be generated */
7030 if (security && (cfg->method == method)) {
7033 cfg->stat_cas_demand_generation++;
7035 if (actions.demand.blob) {
7036 /* Add code for SecurityAction.Demand */
7037 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
7038 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
7039 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
7040 mono_emit_method_call (cfg, secman->demand, args, NULL);
7042 if (actions.noncasdemand.blob) {
7043 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
7044 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
7045 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
7046 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
7047 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
7048 mono_emit_method_call (cfg, secman->demand, args, NULL);
7050 if (actions.demandchoice.blob) {
7051 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
7052 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
7053 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
7054 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
7055 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
7059 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
7061 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
7064 if (mono_security_core_clr_enabled ()) {
7065 /* check if this is native code, e.g. an icall or a p/invoke */
7066 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
7067 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
7069 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
7070 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
7072 /* if this ia a native call then it can only be JITted from platform code */
7073 if ((icall || pinvk) && method->klass && method->klass->image) {
7074 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
7075 MonoException *ex = icall ? mono_get_exception_security () :
7076 mono_get_exception_method_access ();
7077 emit_throw_exception (cfg, ex);
7084 CHECK_CFG_EXCEPTION;
7086 if (header->code_size == 0)
7089 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
7094 if (cfg->method == method)
7095 mono_debug_init_method (cfg, bblock, breakpoint_id);
7097 for (n = 0; n < header->num_locals; ++n) {
7098 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
7103 /* We force the vtable variable here for all shared methods
7104 for the possibility that they might show up in a stack
7105 trace where their exact instantiation is needed. */
7106 if (cfg->generic_sharing_context && method == cfg->method) {
7107 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
7108 mini_method_get_context (method)->method_inst ||
7109 method->klass->valuetype) {
7110 mono_get_vtable_var (cfg);
7112 /* FIXME: Is there a better way to do this?
7113 We need the variable live for the duration
7114 of the whole method. */
7115 cfg->args [0]->flags |= MONO_INST_VOLATILE;
7119 /* add a check for this != NULL to inlined methods */
7120 if (is_virtual_call) {
7123 NEW_ARGLOAD (cfg, arg_ins, 0);
7124 MONO_ADD_INS (cfg->cbb, arg_ins);
7125 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
7128 skip_dead_blocks = !dont_verify;
7129 if (skip_dead_blocks) {
7130 original_bb = bb = mono_basic_block_split (method, &error);
7131 if (!mono_error_ok (&error)) {
7132 mono_error_cleanup (&error);
7138 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
7139 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
7142 start_new_bblock = 0;
7145 if (cfg->method == method)
7146 cfg->real_offset = ip - header->code;
7148 cfg->real_offset = inline_offset;
7153 if (start_new_bblock) {
7154 bblock->cil_length = ip - bblock->cil_code;
7155 if (start_new_bblock == 2) {
7156 g_assert (ip == tblock->cil_code);
7158 GET_BBLOCK (cfg, tblock, ip);
7160 bblock->next_bb = tblock;
7163 start_new_bblock = 0;
7164 for (i = 0; i < bblock->in_scount; ++i) {
7165 if (cfg->verbose_level > 3)
7166 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
7167 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
7171 g_slist_free (class_inits);
7174 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
7175 link_bblock (cfg, bblock, tblock);
7176 if (sp != stack_start) {
7177 handle_stack_args (cfg, stack_start, sp - stack_start);
7179 CHECK_UNVERIFIABLE (cfg);
7181 bblock->next_bb = tblock;
7184 for (i = 0; i < bblock->in_scount; ++i) {
7185 if (cfg->verbose_level > 3)
7186 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
7187 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
7190 g_slist_free (class_inits);
7195 if (skip_dead_blocks) {
7196 int ip_offset = ip - header->code;
7198 if (ip_offset == bb->end)
7202 int op_size = mono_opcode_size (ip, end);
7203 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
7205 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
7207 if (ip_offset + op_size == bb->end) {
7208 MONO_INST_NEW (cfg, ins, OP_NOP);
7209 MONO_ADD_INS (bblock, ins);
7210 start_new_bblock = 1;
7218 * Sequence points are points where the debugger can place a breakpoint.
7219 * Currently, we generate these automatically at points where the IL
7222 if (seq_points && ((sp == stack_start) || (sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code)))) {
7224 * Make methods interruptable at the beginning, and at the targets of
7225 * backward branches.
7226 * Also, do this at the start of every bblock in methods with clauses too,
7227 * to be able to handle instructions with inprecise control flow like
7229 * Backward branches are handled at the end of method-to-ir ().
7231 gboolean intr_loc = ip == header->code || (!cfg->cbb->last_ins && cfg->header->num_clauses);
7233 /* Avoid sequence points on empty IL like .volatile */
7234 // FIXME: Enable this
7235 //if (!(cfg->cbb->last_ins && cfg->cbb->last_ins->opcode == OP_SEQ_POINT)) {
7236 NEW_SEQ_POINT (cfg, ins, ip - header->code, intr_loc);
7237 if (sp != stack_start)
7238 ins->flags |= MONO_INST_NONEMPTY_STACK;
7239 MONO_ADD_INS (cfg->cbb, ins);
7242 mono_bitset_set_fast (seq_point_set_locs, ip - header->code);
7245 bblock->real_offset = cfg->real_offset;
7247 if ((cfg->method == method) && cfg->coverage_info) {
7248 guint32 cil_offset = ip - header->code;
7249 cfg->coverage_info->data [cil_offset].cil_code = ip;
7251 /* TODO: Use an increment here */
7252 #if defined(TARGET_X86)
7253 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
7254 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
7256 MONO_ADD_INS (cfg->cbb, ins);
7258 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
7259 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
7263 if (cfg->verbose_level > 3)
7264 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
7268 if (seq_points && !sym_seq_points && sp != stack_start) {
7270 * The C# compiler uses these nops to notify the JIT that it should
7271 * insert seq points.
7273 NEW_SEQ_POINT (cfg, ins, ip - header->code, FALSE);
7274 MONO_ADD_INS (cfg->cbb, ins);
7276 if (cfg->keep_cil_nops)
7277 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
7279 MONO_INST_NEW (cfg, ins, OP_NOP);
7281 MONO_ADD_INS (bblock, ins);
7284 if (should_insert_brekpoint (cfg->method)) {
7285 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
7287 MONO_INST_NEW (cfg, ins, OP_NOP);
7290 MONO_ADD_INS (bblock, ins);
7296 CHECK_STACK_OVF (1);
7297 n = (*ip)-CEE_LDARG_0;
7299 EMIT_NEW_ARGLOAD (cfg, ins, n);
7307 CHECK_STACK_OVF (1);
7308 n = (*ip)-CEE_LDLOC_0;
7310 EMIT_NEW_LOCLOAD (cfg, ins, n);
7319 n = (*ip)-CEE_STLOC_0;
7322 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
7324 emit_stloc_ir (cfg, sp, header, n);
7331 CHECK_STACK_OVF (1);
7334 EMIT_NEW_ARGLOAD (cfg, ins, n);
7340 CHECK_STACK_OVF (1);
7343 NEW_ARGLOADA (cfg, ins, n);
7344 MONO_ADD_INS (cfg->cbb, ins);
7354 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
7356 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
7361 CHECK_STACK_OVF (1);
7364 EMIT_NEW_LOCLOAD (cfg, ins, n);
7368 case CEE_LDLOCA_S: {
7369 unsigned char *tmp_ip;
7371 CHECK_STACK_OVF (1);
7372 CHECK_LOCAL (ip [1]);
7374 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
7380 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
7389 CHECK_LOCAL (ip [1]);
7390 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
7392 emit_stloc_ir (cfg, sp, header, ip [1]);
7397 CHECK_STACK_OVF (1);
7398 EMIT_NEW_PCONST (cfg, ins, NULL);
7399 ins->type = STACK_OBJ;
7404 CHECK_STACK_OVF (1);
7405 EMIT_NEW_ICONST (cfg, ins, -1);
7418 CHECK_STACK_OVF (1);
7419 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
7425 CHECK_STACK_OVF (1);
7427 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
7433 CHECK_STACK_OVF (1);
7434 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
7440 CHECK_STACK_OVF (1);
7441 MONO_INST_NEW (cfg, ins, OP_I8CONST);
7442 ins->type = STACK_I8;
7443 ins->dreg = alloc_dreg (cfg, STACK_I8);
7445 ins->inst_l = (gint64)read64 (ip);
7446 MONO_ADD_INS (bblock, ins);
7452 gboolean use_aotconst = FALSE;
7454 #ifdef TARGET_POWERPC
7455 /* FIXME: Clean this up */
7456 if (cfg->compile_aot)
7457 use_aotconst = TRUE;
7460 /* FIXME: we should really allocate this only late in the compilation process */
7461 f = mono_domain_alloc (cfg->domain, sizeof (float));
7463 CHECK_STACK_OVF (1);
7469 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
7471 dreg = alloc_freg (cfg);
7472 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
7473 ins->type = STACK_R8;
7475 MONO_INST_NEW (cfg, ins, OP_R4CONST);
7476 ins->type = STACK_R8;
7477 ins->dreg = alloc_dreg (cfg, STACK_R8);
7479 MONO_ADD_INS (bblock, ins);
7489 gboolean use_aotconst = FALSE;
7491 #ifdef TARGET_POWERPC
7492 /* FIXME: Clean this up */
7493 if (cfg->compile_aot)
7494 use_aotconst = TRUE;
7497 /* FIXME: we should really allocate this only late in the compilation process */
7498 d = mono_domain_alloc (cfg->domain, sizeof (double));
7500 CHECK_STACK_OVF (1);
7506 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
7508 dreg = alloc_freg (cfg);
7509 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
7510 ins->type = STACK_R8;
7512 MONO_INST_NEW (cfg, ins, OP_R8CONST);
7513 ins->type = STACK_R8;
7514 ins->dreg = alloc_dreg (cfg, STACK_R8);
7516 MONO_ADD_INS (bblock, ins);
7525 MonoInst *temp, *store;
7527 CHECK_STACK_OVF (1);
7531 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
7532 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
7534 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
7537 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
7550 if (sp [0]->type == STACK_R8)
7551 /* we need to pop the value from the x86 FP stack */
7552 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
7558 INLINE_FAILURE ("jmp");
7559 GSHAREDVT_FAILURE (*ip);
7562 if (stack_start != sp)
7564 token = read32 (ip + 1);
7565 /* FIXME: check the signature matches */
7566 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7568 if (!cmethod || mono_loader_get_last_error ())
7571 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
7572 GENERIC_SHARING_FAILURE (CEE_JMP);
7574 if (mono_security_cas_enabled ())
7575 CHECK_CFG_EXCEPTION;
7577 if (ARCH_HAVE_OP_TAIL_CALL) {
7578 MonoMethodSignature *fsig = mono_method_signature (cmethod);
7581 /* Handle tail calls similarly to calls */
7582 n = fsig->param_count + fsig->hasthis;
7586 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
7587 call->method = cmethod;
7588 call->tail_call = TRUE;
7589 call->signature = mono_method_signature (cmethod);
7590 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
7591 call->inst.inst_p0 = cmethod;
7592 for (i = 0; i < n; ++i)
7593 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
7595 mono_arch_emit_call (cfg, call);
7596 MONO_ADD_INS (bblock, (MonoInst*)call);
7598 for (i = 0; i < num_args; ++i)
7599 /* Prevent arguments from being optimized away */
7600 arg_array [i]->flags |= MONO_INST_VOLATILE;
7602 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
7603 ins = (MonoInst*)call;
7604 ins->inst_p0 = cmethod;
7605 MONO_ADD_INS (bblock, ins);
7609 start_new_bblock = 1;
7614 case CEE_CALLVIRT: {
7615 MonoInst *addr = NULL;
7616 MonoMethodSignature *fsig = NULL;
7618 int virtual = *ip == CEE_CALLVIRT;
7619 int calli = *ip == CEE_CALLI;
7620 gboolean pass_imt_from_rgctx = FALSE;
7621 MonoInst *imt_arg = NULL;
7622 MonoInst *keep_this_alive = NULL;
7623 gboolean pass_vtable = FALSE;
7624 gboolean pass_mrgctx = FALSE;
7625 MonoInst *vtable_arg = NULL;
7626 gboolean check_this = FALSE;
7627 gboolean supported_tail_call = FALSE;
7628 gboolean tail_call = FALSE;
7629 gboolean need_seq_point = FALSE;
7630 guint32 call_opcode = *ip;
7631 gboolean emit_widen = TRUE;
7632 gboolean push_res = TRUE;
7633 gboolean skip_ret = FALSE;
7634 gboolean delegate_invoke = FALSE;
7637 token = read32 (ip + 1);
7642 //GSHAREDVT_FAILURE (*ip);
7647 fsig = mini_get_signature (method, token, generic_context);
7648 n = fsig->param_count + fsig->hasthis;
7650 if (method->dynamic && fsig->pinvoke) {
7654 * This is a call through a function pointer using a pinvoke
7655 * signature. Have to create a wrapper and call that instead.
7656 * FIXME: This is very slow, need to create a wrapper at JIT time
7657 * instead based on the signature.
7659 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
7660 EMIT_NEW_PCONST (cfg, args [1], fsig);
7662 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
7665 MonoMethod *cil_method;
7667 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7668 cil_method = cmethod;
7670 if (constrained_call) {
7671 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7672 if (cfg->verbose_level > 2)
7673 printf ("DM Constrained call to %s\n", mono_type_get_full_name (constrained_call));
7674 if (!((constrained_call->byval_arg.type == MONO_TYPE_VAR ||
7675 constrained_call->byval_arg.type == MONO_TYPE_MVAR) &&
7676 cfg->generic_sharing_context)) {
7677 cmethod = mono_get_method_constrained_with_method (image, cil_method, constrained_call, generic_context);
7680 if (cfg->verbose_level > 2)
7681 printf ("Constrained call to %s\n", mono_type_get_full_name (constrained_call));
7683 if ((constrained_call->byval_arg.type == MONO_TYPE_VAR || constrained_call->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
7685 * This is needed since get_method_constrained can't find
7686 * the method in klass representing a type var.
7687 * The type var is guaranteed to be a reference type in this
7690 if (!mini_is_gsharedvt_klass (cfg, constrained_call))
7691 g_assert (!cmethod->klass->valuetype);
7693 cmethod = mono_get_method_constrained (image, token, constrained_call, generic_context, &cil_method);
7698 if (!cmethod || mono_loader_get_last_error ())
7700 if (!dont_verify && !cfg->skip_visibility) {
7701 MonoMethod *target_method = cil_method;
7702 if (method->is_inflated) {
7703 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
7705 if (!mono_method_can_access_method (method_definition, target_method) &&
7706 !mono_method_can_access_method (method, cil_method))
7707 METHOD_ACCESS_FAILURE;
7710 if (mono_security_core_clr_enabled ())
7711 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
7713 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
7714 /* MS.NET seems to silently convert this to a callvirt */
7719 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
7720 * converts to a callvirt.
7722 * tests/bug-515884.il is an example of this behavior
7724 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
7725 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
7726 if (!virtual && mono_class_is_marshalbyref (cmethod->klass) && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
7730 if (!cmethod->klass->inited)
7731 if (!mono_class_init (cmethod->klass))
7732 TYPE_LOAD_ERROR (cmethod->klass);
7734 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
7735 mini_class_is_system_array (cmethod->klass)) {
7736 array_rank = cmethod->klass->rank;
7737 fsig = mono_method_signature (cmethod);
7739 fsig = mono_method_signature (cmethod);
7744 if (fsig->pinvoke) {
7745 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
7746 check_for_pending_exc, cfg->compile_aot);
7747 fsig = mono_method_signature (wrapper);
7748 } else if (constrained_call) {
7749 fsig = mono_method_signature (cmethod);
7751 fsig = mono_method_get_signature_full (cmethod, image, token, generic_context);
7755 mono_save_token_info (cfg, image, token, cil_method);
7757 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
7759 * Need to emit an implicit seq point after every non-void call so single stepping through nested calls like
7760 * foo (bar (), baz ())
7761 * works correctly. MS does this also:
7762 * http://stackoverflow.com/questions/6937198/making-your-net-language-step-correctly-in-the-debugger
7763 * The problem with this approach is that the debugger will stop after all calls returning a value,
7764 * even for simple cases, like:
7767 /* Special case a few common successor opcodes */
7768 if (!(ip + 5 < end && (ip [5] == CEE_POP || ip [5] == CEE_NOP)) && !(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip + 5 - header->code)))
7769 need_seq_point = TRUE;
7772 n = fsig->param_count + fsig->hasthis;
7774 /* Don't support calls made using type arguments for now */
7776 if (cfg->gsharedvt) {
7777 if (mini_is_gsharedvt_signature (cfg, fsig))
7778 GSHAREDVT_FAILURE (*ip);
7782 if (mono_security_cas_enabled ()) {
7783 if (check_linkdemand (cfg, method, cmethod))
7784 INLINE_FAILURE ("linkdemand");
7785 CHECK_CFG_EXCEPTION;
7788 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
7789 g_assert_not_reached ();
7792 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
7795 if (!cfg->generic_sharing_context && cmethod)
7796 g_assert (!mono_method_check_context_used (cmethod));
7800 //g_assert (!virtual || fsig->hasthis);
7804 if (constrained_call) {
7805 if (mini_is_gsharedvt_klass (cfg, constrained_call)) {
7807 * Constrained calls need to behave differently at runtime dependending on whenever the receiver is instantiated as ref type or as a vtype.
7809 if ((cmethod->klass != mono_defaults.object_class) && constrained_call->valuetype && cmethod->klass->valuetype) {
7810 /* The 'Own method' case below */
7811 } else if (cmethod->klass->image != mono_defaults.corlib && !(cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) && !cmethod->klass->valuetype) {
7812 /* 'The type parameter is instantiated as a reference type' case below. */
7813 } else if (((cmethod->klass == mono_defaults.object_class) || (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) || (!cmethod->klass->valuetype && cmethod->klass->image != mono_defaults.corlib)) &&
7814 (MONO_TYPE_IS_VOID (fsig->ret) || MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_IS_REFERENCE (fsig->ret) || mini_is_gsharedvt_type (cfg, fsig->ret)) &&
7815 (fsig->param_count == 0 || (!fsig->hasthis && fsig->param_count == 1) || (fsig->param_count == 1 && (MONO_TYPE_IS_REFERENCE (fsig->params [0]) || mini_is_gsharedvt_type (cfg, fsig->params [0]))))) {
7816 MonoInst *args [16];
7819 * This case handles calls to
7820 * - object:ToString()/Equals()/GetHashCode(),
7821 * - System.IComparable<T>:CompareTo()
7822 * - System.IEquatable<T>:Equals ()
7823 * plus some simple interface calls enough to support AsyncTaskMethodBuilder.
7827 if (mono_method_check_context_used (cmethod))
7828 args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (cmethod), cmethod, MONO_RGCTX_INFO_METHOD);
7830 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
7831 args [2] = emit_get_rgctx_klass (cfg, mono_class_check_context_used (constrained_call), constrained_call, MONO_RGCTX_INFO_KLASS);
7833 /* !fsig->hasthis is for the wrapper for the Object.GetType () icall */
7834 if (fsig->hasthis && fsig->param_count) {
7835 /* Pass the arguments using a localloc-ed array using the format expected by runtime_invoke () */
7836 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
7837 ins->dreg = alloc_preg (cfg);
7838 ins->inst_imm = fsig->param_count * sizeof (mgreg_t);
7839 MONO_ADD_INS (cfg->cbb, ins);
7842 if (mini_is_gsharedvt_type (cfg, fsig->params [0])) {
7845 args [3] = emit_get_gsharedvt_info_klass (cfg, mono_class_from_mono_type (fsig->params [0]), MONO_RGCTX_INFO_CLASS_BOX_TYPE);
7847 EMIT_NEW_VARLOADA_VREG (cfg, ins, sp [1]->dreg, fsig->params [0]);
7848 addr_reg = ins->dreg;
7849 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, addr_reg);
7851 EMIT_NEW_ICONST (cfg, args [3], 0);
7852 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, sp [1]->dreg);
7855 EMIT_NEW_ICONST (cfg, args [3], 0);
7856 EMIT_NEW_ICONST (cfg, args [4], 0);
7858 ins = mono_emit_jit_icall (cfg, mono_gsharedvt_constrained_call, args);
7861 if (mini_is_gsharedvt_type (cfg, fsig->ret)) {
7862 ins = handle_unbox_gsharedvt (cfg, mono_class_from_mono_type (fsig->ret), ins, &bblock);
7863 } else if (MONO_TYPE_IS_PRIMITIVE (fsig->ret)) {
7867 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), ins->dreg, sizeof (MonoObject));
7868 MONO_ADD_INS (cfg->cbb, add);
7870 NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, add->dreg, 0);
7871 MONO_ADD_INS (cfg->cbb, ins);
7872 /* ins represents the call result */
7877 GSHAREDVT_FAILURE (*ip);
7881 * We have the `constrained.' prefix opcode.
7883 if (constrained_call->valuetype && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
7885 * The type parameter is instantiated as a valuetype,
7886 * but that type doesn't override the method we're
7887 * calling, so we need to box `this'.
7889 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
7890 ins->klass = constrained_call;
7891 sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call), &bblock);
7892 CHECK_CFG_EXCEPTION;
7893 } else if (!constrained_call->valuetype) {
7894 int dreg = alloc_ireg_ref (cfg);
7897 * The type parameter is instantiated as a reference
7898 * type. We have a managed pointer on the stack, so
7899 * we need to dereference it here.
7901 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
7902 ins->type = STACK_OBJ;
7905 if (cmethod->klass->valuetype) {
7908 /* Interface method */
7911 mono_class_setup_vtable (constrained_call);
7912 CHECK_TYPELOAD (constrained_call);
7913 ioffset = mono_class_interface_offset (constrained_call, cmethod->klass);
7915 TYPE_LOAD_ERROR (constrained_call);
7916 slot = mono_method_get_vtable_slot (cmethod);
7918 TYPE_LOAD_ERROR (cmethod->klass);
7919 cmethod = constrained_call->vtable [ioffset + slot];
7921 if (cmethod->klass == mono_defaults.enum_class) {
7922 /* Enum implements some interfaces, so treat this as the first case */
7923 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
7924 ins->klass = constrained_call;
7925 sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call), &bblock);
7926 CHECK_CFG_EXCEPTION;
7931 constrained_call = NULL;
7934 if (!calli && check_call_signature (cfg, fsig, sp))
7937 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
7938 if (cmethod && (cmethod->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (cmethod->name, "Invoke"))
7939 delegate_invoke = TRUE;
7942 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_sharable_method (cfg, cmethod, fsig, sp))) {
7944 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
7945 type_to_eval_stack_type ((cfg), fsig->ret, ins);
7953 * If the callee is a shared method, then its static cctor
7954 * might not get called after the call was patched.
7956 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
7957 emit_generic_class_init (cfg, cmethod->klass);
7958 CHECK_TYPELOAD (cmethod->klass);
7962 check_method_sharing (cfg, cmethod, &pass_vtable, &pass_mrgctx);
7964 if (cfg->generic_sharing_context && cmethod) {
7965 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
7967 context_used = mini_method_check_context_used (cfg, cmethod);
7969 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
7970 /* Generic method interface
7971 calls are resolved via a
7972 helper function and don't
7974 if (!cmethod_context || !cmethod_context->method_inst)
7975 pass_imt_from_rgctx = TRUE;
7979 * If a shared method calls another
7980 * shared method then the caller must
7981 * have a generic sharing context
7982 * because the magic trampoline
7983 * requires it. FIXME: We shouldn't
7984 * have to force the vtable/mrgctx
7985 * variable here. Instead there
7986 * should be a flag in the cfg to
7987 * request a generic sharing context.
7990 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
7991 mono_get_vtable_var (cfg);
7996 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7998 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
8000 CHECK_TYPELOAD (cmethod->klass);
8001 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
8006 g_assert (!vtable_arg);
8008 if (!cfg->compile_aot) {
8010 * emit_get_rgctx_method () calls mono_class_vtable () so check
8011 * for type load errors before.
8013 mono_class_setup_vtable (cmethod->klass);
8014 CHECK_TYPELOAD (cmethod->klass);
8017 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
8019 /* !marshalbyref is needed to properly handle generic methods + remoting */
8020 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
8021 MONO_METHOD_IS_FINAL (cmethod)) &&
8022 !mono_class_is_marshalbyref (cmethod->klass)) {
8029 if (pass_imt_from_rgctx) {
8030 g_assert (!pass_vtable);
8033 imt_arg = emit_get_rgctx_method (cfg, context_used,
8034 cmethod, MONO_RGCTX_INFO_METHOD);
8038 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
8040 /* Calling virtual generic methods */
8041 if (cmethod && virtual &&
8042 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
8043 !(MONO_METHOD_IS_FINAL (cmethod) &&
8044 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
8045 fsig->generic_param_count &&
8046 !(cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))) {
8047 MonoInst *this_temp, *this_arg_temp, *store;
8048 MonoInst *iargs [4];
8049 gboolean use_imt = FALSE;
8051 g_assert (fsig->is_inflated);
8053 /* Prevent inlining of methods that contain indirect calls */
8054 INLINE_FAILURE ("virtual generic call");
8056 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))
8057 GSHAREDVT_FAILURE (*ip);
8059 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK && defined(MONO_ARCH_GSHARED_SUPPORTED)
8060 if (cmethod->wrapper_type == MONO_WRAPPER_NONE && mono_use_imt)
8065 g_assert (!imt_arg);
8067 g_assert (cmethod->is_inflated);
8068 imt_arg = emit_get_rgctx_method (cfg, context_used,
8069 cmethod, MONO_RGCTX_INFO_METHOD);
8070 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, sp [0], imt_arg, NULL);
8072 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
8073 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
8074 MONO_ADD_INS (bblock, store);
8076 /* FIXME: This should be a managed pointer */
8077 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
8079 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
8080 iargs [1] = emit_get_rgctx_method (cfg, context_used,
8081 cmethod, MONO_RGCTX_INFO_METHOD);
8082 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
8083 addr = mono_emit_jit_icall (cfg,
8084 mono_helper_compile_generic_method, iargs);
8086 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
8088 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8095 * Implement a workaround for the inherent races involved in locking:
8101 * If a thread abort happens between the call to Monitor.Enter () and the start of the
8102 * try block, the Exit () won't be executed, see:
8103 * http://www.bluebytesoftware.com/blog/2007/01/30/MonitorEnterThreadAbortsAndOrphanedLocks.aspx
8104 * To work around this, we extend such try blocks to include the last x bytes
8105 * of the Monitor.Enter () call.
8107 if (cmethod && cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
8108 MonoBasicBlock *tbb;
8110 GET_BBLOCK (cfg, tbb, ip + 5);
8112 * Only extend try blocks with a finally, to avoid catching exceptions thrown
8113 * from Monitor.Enter like ArgumentNullException.
8115 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
8116 /* Mark this bblock as needing to be extended */
8117 tbb->extend_try_block = TRUE;
8121 /* Conversion to a JIT intrinsic */
8122 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
8124 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8125 type_to_eval_stack_type ((cfg), fsig->ret, ins);
8132 if (cmethod && (cfg->opt & MONO_OPT_INLINE) &&
8133 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
8134 !disable_inline && mono_method_check_inlining (cfg, cmethod) &&
8135 !g_list_find (dont_inline, cmethod)) {
8137 gboolean always = FALSE;
8139 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
8140 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
8141 /* Prevent inlining of methods that call wrappers */
8142 INLINE_FAILURE ("wrapper call");
8143 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
8147 costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, always);
8149 cfg->real_offset += 5;
8152 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8153 /* *sp is already set by inline_method */
8158 inline_costs += costs;
8164 /* Tail recursion elimination */
8165 if ((cfg->opt & MONO_OPT_TAILC) && call_opcode == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
8166 gboolean has_vtargs = FALSE;
8169 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
8170 INLINE_FAILURE ("tail call");
8172 /* keep it simple */
8173 for (i = fsig->param_count - 1; i >= 0; i--) {
8174 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
8179 for (i = 0; i < n; ++i)
8180 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
8181 MONO_INST_NEW (cfg, ins, OP_BR);
8182 MONO_ADD_INS (bblock, ins);
8183 tblock = start_bblock->out_bb [0];
8184 link_bblock (cfg, bblock, tblock);
8185 ins->inst_target_bb = tblock;
8186 start_new_bblock = 1;
8188 /* skip the CEE_RET, too */
8189 if (ip_in_bb (cfg, bblock, ip + 5))
8196 inline_costs += 10 * num_calls++;
8199 * Making generic calls out of gsharedvt methods.
8201 if (cmethod && cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
8202 MonoRgctxInfoType info_type;
8205 //if (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)
8206 //GSHAREDVT_FAILURE (*ip);
8207 // disable for possible remoting calls
8208 if (fsig->hasthis && (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class))
8209 GSHAREDVT_FAILURE (*ip);
8210 if (fsig->generic_param_count) {
8211 /* virtual generic call */
8212 g_assert (mono_use_imt);
8213 g_assert (!imt_arg);
8214 /* Same as the virtual generic case above */
8215 imt_arg = emit_get_rgctx_method (cfg, context_used,
8216 cmethod, MONO_RGCTX_INFO_METHOD);
8217 /* This is not needed, as the trampoline code will pass one, and it might be passed in the same reg as the imt arg */
8222 if (cmethod->klass->rank && cmethod->klass->byval_arg.type != MONO_TYPE_SZARRAY)
8223 /* test_0_multi_dim_arrays () in gshared.cs */
8224 GSHAREDVT_FAILURE (*ip);
8226 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (cmethod->name, "Invoke")))
8227 keep_this_alive = sp [0];
8229 if (virtual && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))
8230 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE_VIRT;
8232 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE;
8233 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, info_type);
8235 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
8237 } else if (calli && cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
8239 * We pass the address to the gsharedvt trampoline in the rgctx reg
8241 MonoInst *callee = addr;
8243 if (method->wrapper_type != MONO_WRAPPER_DELEGATE_INVOKE)
8245 GSHAREDVT_FAILURE (*ip);
8247 addr = emit_get_rgctx_sig (cfg, context_used,
8248 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
8249 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, callee);
8253 /* Generic sharing */
8254 /* FIXME: only do this for generic methods if
8255 they are not shared! */
8256 if (context_used && !imt_arg && !array_rank && !delegate_invoke &&
8257 (!mono_method_is_generic_sharable (cmethod, TRUE) ||
8258 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
8259 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
8260 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
8261 INLINE_FAILURE ("gshared");
8263 g_assert (cfg->generic_sharing_context && cmethod);
8267 * We are compiling a call to a
8268 * generic method from shared code,
8269 * which means that we have to look up
8270 * the method in the rgctx and do an
8274 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
8276 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
8277 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
8281 /* Indirect calls */
8283 if (call_opcode == CEE_CALL)
8284 g_assert (context_used);
8285 else if (call_opcode == CEE_CALLI)
8286 g_assert (!vtable_arg);
8288 /* FIXME: what the hell is this??? */
8289 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
8290 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
8292 /* Prevent inlining of methods with indirect calls */
8293 INLINE_FAILURE ("indirect call");
8295 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST || addr->opcode == OP_GOT_ENTRY) {
8300 * Instead of emitting an indirect call, emit a direct call
8301 * with the contents of the aotconst as the patch info.
8303 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST) {
8304 info_type = addr->inst_c1;
8305 info_data = addr->inst_p0;
8307 info_type = addr->inst_right->inst_c1;
8308 info_data = addr->inst_right->inst_left;
8311 if (info_type == MONO_PATCH_INFO_ICALL_ADDR || info_type == MONO_PATCH_INFO_JIT_ICALL_ADDR) {
8312 ins = (MonoInst*)mono_emit_abs_call (cfg, info_type, info_data, fsig, sp);
8317 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
8325 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
8326 MonoInst *val = sp [fsig->param_count];
8328 if (val->type == STACK_OBJ) {
8329 MonoInst *iargs [2];
8334 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
8337 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
8338 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, val->dreg);
8339 if (cfg->gen_write_barriers && val->type == STACK_OBJ && !(val->opcode == OP_PCONST && val->inst_c0 == 0))
8340 emit_write_barrier (cfg, addr, val);
8341 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
8342 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
8344 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
8345 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
8346 if (!cmethod->klass->element_class->valuetype && !readonly)
8347 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
8348 CHECK_TYPELOAD (cmethod->klass);
8351 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
8354 g_assert_not_reached ();
8361 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
8365 /* Tail prefix / tail call optimization */
8367 /* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests */
8368 /* FIXME: runtime generic context pointer for jumps? */
8369 /* FIXME: handle this for generic sharing eventually */
8370 if (cmethod && (ins_flag & MONO_INST_TAILCALL) &&
8371 !vtable_arg && !cfg->generic_sharing_context && is_supported_tail_call (cfg, method, cmethod, fsig, call_opcode))
8372 supported_tail_call = TRUE;
8374 if (supported_tail_call) {
8377 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
8378 INLINE_FAILURE ("tail call");
8380 //printf ("HIT: %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
8382 if (ARCH_HAVE_OP_TAIL_CALL) {
8383 /* Handle tail calls similarly to normal calls */
8386 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
8387 call->tail_call = TRUE;
8388 call->method = cmethod;
8389 call->signature = mono_method_signature (cmethod);
8392 * We implement tail calls by storing the actual arguments into the
8393 * argument variables, then emitting a CEE_JMP.
8395 for (i = 0; i < n; ++i) {
8396 /* Prevent argument from being register allocated */
8397 arg_array [i]->flags |= MONO_INST_VOLATILE;
8398 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
8400 ins = (MonoInst*)call;
8401 ins->inst_p0 = cmethod;
8402 ins->inst_p1 = arg_array [0];
8403 MONO_ADD_INS (bblock, ins);
8404 link_bblock (cfg, bblock, end_bblock);
8405 start_new_bblock = 1;
8407 // FIXME: Eliminate unreachable epilogs
8410 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
8411 * only reachable from this call.
8413 GET_BBLOCK (cfg, tblock, ip + 5);
8414 if (tblock == bblock || tblock->in_count == 0)
8423 * Synchronized wrappers.
8424 * Its hard to determine where to replace a method with its synchronized
8425 * wrapper without causing an infinite recursion. The current solution is
8426 * to add the synchronized wrapper in the trampolines, and to
8427 * change the called method to a dummy wrapper, and resolve that wrapper
8428 * to the real method in mono_jit_compile_method ().
8430 if (cfg->method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
8431 MonoMethod *orig = mono_marshal_method_from_wrapper (cfg->method);
8432 if (cmethod == orig || (cmethod->is_inflated && mono_method_get_declaring_generic_method (cmethod) == orig))
8433 cmethod = mono_marshal_get_synchronized_inner_wrapper (cmethod);
8437 INLINE_FAILURE ("call");
8438 ins = mono_emit_method_call_full (cfg, cmethod, fsig, tail_call, sp, virtual ? sp [0] : NULL,
8439 imt_arg, vtable_arg);
8442 link_bblock (cfg, bblock, end_bblock);
8443 start_new_bblock = 1;
8445 // FIXME: Eliminate unreachable epilogs
8448 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
8449 * only reachable from this call.
8451 GET_BBLOCK (cfg, tblock, ip + 5);
8452 if (tblock == bblock || tblock->in_count == 0)
8459 /* End of call, INS should contain the result of the call, if any */
8461 if (push_res && !MONO_TYPE_IS_VOID (fsig->ret)) {
8464 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
8469 if (keep_this_alive) {
8470 MonoInst *dummy_use;
8472 /* See mono_emit_method_call_full () */
8473 EMIT_NEW_DUMMY_USE (cfg, dummy_use, keep_this_alive);
8476 CHECK_CFG_EXCEPTION;
8480 g_assert (*ip == CEE_RET);
8484 constrained_call = NULL;
8486 emit_seq_point (cfg, method, ip, FALSE, TRUE);
8490 if (cfg->method != method) {
8491 /* return from inlined method */
8493 * If in_count == 0, that means the ret is unreachable due to
8494 * being preceeded by a throw. In that case, inline_method () will
8495 * handle setting the return value
8496 * (test case: test_0_inline_throw ()).
8498 if (return_var && cfg->cbb->in_count) {
8499 MonoType *ret_type = mono_method_signature (method)->ret;
8505 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
8508 //g_assert (returnvar != -1);
8509 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
8510 cfg->ret_var_set = TRUE;
8513 if (cfg->lmf_var && cfg->cbb->in_count)
8517 MonoType *ret_type = mini_replace_type (mono_method_signature (method)->ret);
8519 if (seq_points && !sym_seq_points) {
8521 * Place a seq point here too even through the IL stack is not
8522 * empty, so a step over on
8525 * will work correctly.
8527 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
8528 MONO_ADD_INS (cfg->cbb, ins);
8531 g_assert (!return_var);
8535 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
8538 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
8541 if (!cfg->vret_addr) {
8544 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
8546 EMIT_NEW_RETLOADA (cfg, ret_addr);
8548 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
8549 ins->klass = mono_class_from_mono_type (ret_type);
8552 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
8553 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
8554 MonoInst *iargs [1];
8558 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
8559 mono_arch_emit_setret (cfg, method, conv);
8561 mono_arch_emit_setret (cfg, method, *sp);
8564 mono_arch_emit_setret (cfg, method, *sp);
8569 if (sp != stack_start)
8571 MONO_INST_NEW (cfg, ins, OP_BR);
8573 ins->inst_target_bb = end_bblock;
8574 MONO_ADD_INS (bblock, ins);
8575 link_bblock (cfg, bblock, end_bblock);
8576 start_new_bblock = 1;
8580 MONO_INST_NEW (cfg, ins, OP_BR);
8582 target = ip + 1 + (signed char)(*ip);
8584 GET_BBLOCK (cfg, tblock, target);
8585 link_bblock (cfg, bblock, tblock);
8586 ins->inst_target_bb = tblock;
8587 if (sp != stack_start) {
8588 handle_stack_args (cfg, stack_start, sp - stack_start);
8590 CHECK_UNVERIFIABLE (cfg);
8592 MONO_ADD_INS (bblock, ins);
8593 start_new_bblock = 1;
8594 inline_costs += BRANCH_COST;
8608 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
8610 target = ip + 1 + *(signed char*)ip;
8616 inline_costs += BRANCH_COST;
8620 MONO_INST_NEW (cfg, ins, OP_BR);
8623 target = ip + 4 + (gint32)read32(ip);
8625 GET_BBLOCK (cfg, tblock, target);
8626 link_bblock (cfg, bblock, tblock);
8627 ins->inst_target_bb = tblock;
8628 if (sp != stack_start) {
8629 handle_stack_args (cfg, stack_start, sp - stack_start);
8631 CHECK_UNVERIFIABLE (cfg);
8634 MONO_ADD_INS (bblock, ins);
8636 start_new_bblock = 1;
8637 inline_costs += BRANCH_COST;
8644 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
8645 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
8646 guint32 opsize = is_short ? 1 : 4;
8648 CHECK_OPSIZE (opsize);
8650 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
8653 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
8658 GET_BBLOCK (cfg, tblock, target);
8659 link_bblock (cfg, bblock, tblock);
8660 GET_BBLOCK (cfg, tblock, ip);
8661 link_bblock (cfg, bblock, tblock);
8663 if (sp != stack_start) {
8664 handle_stack_args (cfg, stack_start, sp - stack_start);
8665 CHECK_UNVERIFIABLE (cfg);
8668 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
8669 cmp->sreg1 = sp [0]->dreg;
8670 type_from_op (cmp, sp [0], NULL);
8673 #if SIZEOF_REGISTER == 4
8674 if (cmp->opcode == OP_LCOMPARE_IMM) {
8675 /* Convert it to OP_LCOMPARE */
8676 MONO_INST_NEW (cfg, ins, OP_I8CONST);
8677 ins->type = STACK_I8;
8678 ins->dreg = alloc_dreg (cfg, STACK_I8);
8680 MONO_ADD_INS (bblock, ins);
8681 cmp->opcode = OP_LCOMPARE;
8682 cmp->sreg2 = ins->dreg;
8685 MONO_ADD_INS (bblock, cmp);
8687 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
8688 type_from_op (ins, sp [0], NULL);
8689 MONO_ADD_INS (bblock, ins);
8690 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
8691 GET_BBLOCK (cfg, tblock, target);
8692 ins->inst_true_bb = tblock;
8693 GET_BBLOCK (cfg, tblock, ip);
8694 ins->inst_false_bb = tblock;
8695 start_new_bblock = 2;
8698 inline_costs += BRANCH_COST;
8713 MONO_INST_NEW (cfg, ins, *ip);
8715 target = ip + 4 + (gint32)read32(ip);
8721 inline_costs += BRANCH_COST;
8725 MonoBasicBlock **targets;
8726 MonoBasicBlock *default_bblock;
8727 MonoJumpInfoBBTable *table;
8728 int offset_reg = alloc_preg (cfg);
8729 int target_reg = alloc_preg (cfg);
8730 int table_reg = alloc_preg (cfg);
8731 int sum_reg = alloc_preg (cfg);
8732 gboolean use_op_switch;
8736 n = read32 (ip + 1);
8739 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
8743 CHECK_OPSIZE (n * sizeof (guint32));
8744 target = ip + n * sizeof (guint32);
8746 GET_BBLOCK (cfg, default_bblock, target);
8747 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
8749 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
8750 for (i = 0; i < n; ++i) {
8751 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
8752 targets [i] = tblock;
8753 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
8757 if (sp != stack_start) {
8759 * Link the current bb with the targets as well, so handle_stack_args
8760 * will set their in_stack correctly.
8762 link_bblock (cfg, bblock, default_bblock);
8763 for (i = 0; i < n; ++i)
8764 link_bblock (cfg, bblock, targets [i]);
8766 handle_stack_args (cfg, stack_start, sp - stack_start);
8768 CHECK_UNVERIFIABLE (cfg);
8771 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
8772 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
8775 for (i = 0; i < n; ++i)
8776 link_bblock (cfg, bblock, targets [i]);
8778 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
8779 table->table = targets;
8780 table->table_size = n;
8782 use_op_switch = FALSE;
8784 /* ARM implements SWITCH statements differently */
8785 /* FIXME: Make it use the generic implementation */
8786 if (!cfg->compile_aot)
8787 use_op_switch = TRUE;
8790 if (COMPILE_LLVM (cfg))
8791 use_op_switch = TRUE;
8793 cfg->cbb->has_jump_table = 1;
8795 if (use_op_switch) {
8796 MONO_INST_NEW (cfg, ins, OP_SWITCH);
8797 ins->sreg1 = src1->dreg;
8798 ins->inst_p0 = table;
8799 ins->inst_many_bb = targets;
8800 ins->klass = GUINT_TO_POINTER (n);
8801 MONO_ADD_INS (cfg->cbb, ins);
8803 if (sizeof (gpointer) == 8)
8804 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
8806 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
8808 #if SIZEOF_REGISTER == 8
8809 /* The upper word might not be zero, and we add it to a 64 bit address later */
8810 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
8813 if (cfg->compile_aot) {
8814 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
8816 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
8817 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
8818 ins->inst_p0 = table;
8819 ins->dreg = table_reg;
8820 MONO_ADD_INS (cfg->cbb, ins);
8823 /* FIXME: Use load_memindex */
8824 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
8825 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
8826 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
8828 start_new_bblock = 1;
8829 inline_costs += (BRANCH_COST * 2);
8849 dreg = alloc_freg (cfg);
8852 dreg = alloc_lreg (cfg);
8855 dreg = alloc_ireg_ref (cfg);
8858 dreg = alloc_preg (cfg);
8861 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
8862 ins->type = ldind_type [*ip - CEE_LDIND_I1];
8863 ins->flags |= ins_flag;
8865 MONO_ADD_INS (bblock, ins);
8867 if (ins->flags & MONO_INST_VOLATILE) {
8868 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
8869 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
8870 emit_memory_barrier (cfg, FullBarrier);
8885 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
8886 ins->flags |= ins_flag;
8889 if (ins->flags & MONO_INST_VOLATILE) {
8890 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
8891 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
8892 emit_memory_barrier (cfg, FullBarrier);
8895 MONO_ADD_INS (bblock, ins);
8897 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0)))
8898 emit_write_barrier (cfg, sp [0], sp [1]);
8907 MONO_INST_NEW (cfg, ins, (*ip));
8909 ins->sreg1 = sp [0]->dreg;
8910 ins->sreg2 = sp [1]->dreg;
8911 type_from_op (ins, sp [0], sp [1]);
8913 ins->dreg = alloc_dreg ((cfg), (ins)->type);
8915 /* Use the immediate opcodes if possible */
8916 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
8917 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
8918 if (imm_opcode != -1) {
8919 ins->opcode = imm_opcode;
8920 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
8923 sp [1]->opcode = OP_NOP;
8927 MONO_ADD_INS ((cfg)->cbb, (ins));
8929 *sp++ = mono_decompose_opcode (cfg, ins);
8946 MONO_INST_NEW (cfg, ins, (*ip));
8948 ins->sreg1 = sp [0]->dreg;
8949 ins->sreg2 = sp [1]->dreg;
8950 type_from_op (ins, sp [0], sp [1]);
8952 ADD_WIDEN_OP (ins, sp [0], sp [1]);
8953 ins->dreg = alloc_dreg ((cfg), (ins)->type);
8955 /* FIXME: Pass opcode to is_inst_imm */
8957 /* Use the immediate opcodes if possible */
8958 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
8961 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
8962 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
8963 /* Keep emulated opcodes which are optimized away later */
8964 if ((ins->opcode == OP_IREM_UN || ins->opcode == OP_IDIV_UN_IMM) && (cfg->opt & (MONO_OPT_CONSPROP | MONO_OPT_COPYPROP)) && sp [1]->opcode == OP_ICONST && mono_is_power_of_two (sp [1]->inst_c0) >= 0) {
8965 imm_opcode = mono_op_to_op_imm (ins->opcode);
8968 if (imm_opcode != -1) {
8969 ins->opcode = imm_opcode;
8970 if (sp [1]->opcode == OP_I8CONST) {
8971 #if SIZEOF_REGISTER == 8
8972 ins->inst_imm = sp [1]->inst_l;
8974 ins->inst_ls_word = sp [1]->inst_ls_word;
8975 ins->inst_ms_word = sp [1]->inst_ms_word;
8979 ins->inst_imm = (gssize)(sp [1]->inst_c0);
8982 /* Might be followed by an instruction added by ADD_WIDEN_OP */
8983 if (sp [1]->next == NULL)
8984 sp [1]->opcode = OP_NOP;
8987 MONO_ADD_INS ((cfg)->cbb, (ins));
8989 *sp++ = mono_decompose_opcode (cfg, ins);
9002 case CEE_CONV_OVF_I8:
9003 case CEE_CONV_OVF_U8:
9007 /* Special case this earlier so we have long constants in the IR */
9008 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
9009 int data = sp [-1]->inst_c0;
9010 sp [-1]->opcode = OP_I8CONST;
9011 sp [-1]->type = STACK_I8;
9012 #if SIZEOF_REGISTER == 8
9013 if ((*ip) == CEE_CONV_U8)
9014 sp [-1]->inst_c0 = (guint32)data;
9016 sp [-1]->inst_c0 = data;
9018 sp [-1]->inst_ls_word = data;
9019 if ((*ip) == CEE_CONV_U8)
9020 sp [-1]->inst_ms_word = 0;
9022 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
9024 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
9031 case CEE_CONV_OVF_I4:
9032 case CEE_CONV_OVF_I1:
9033 case CEE_CONV_OVF_I2:
9034 case CEE_CONV_OVF_I:
9035 case CEE_CONV_OVF_U:
9038 if (sp [-1]->type == STACK_R8) {
9039 ADD_UNOP (CEE_CONV_OVF_I8);
9046 case CEE_CONV_OVF_U1:
9047 case CEE_CONV_OVF_U2:
9048 case CEE_CONV_OVF_U4:
9051 if (sp [-1]->type == STACK_R8) {
9052 ADD_UNOP (CEE_CONV_OVF_U8);
9059 case CEE_CONV_OVF_I1_UN:
9060 case CEE_CONV_OVF_I2_UN:
9061 case CEE_CONV_OVF_I4_UN:
9062 case CEE_CONV_OVF_I8_UN:
9063 case CEE_CONV_OVF_U1_UN:
9064 case CEE_CONV_OVF_U2_UN:
9065 case CEE_CONV_OVF_U4_UN:
9066 case CEE_CONV_OVF_U8_UN:
9067 case CEE_CONV_OVF_I_UN:
9068 case CEE_CONV_OVF_U_UN:
9075 CHECK_CFG_EXCEPTION;
9079 case CEE_ADD_OVF_UN:
9081 case CEE_MUL_OVF_UN:
9083 case CEE_SUB_OVF_UN:
9089 GSHAREDVT_FAILURE (*ip);
9092 token = read32 (ip + 1);
9093 klass = mini_get_class (method, token, generic_context);
9094 CHECK_TYPELOAD (klass);
9096 if (generic_class_is_reference_type (cfg, klass)) {
9097 MonoInst *store, *load;
9098 int dreg = alloc_ireg_ref (cfg);
9100 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
9101 load->flags |= ins_flag;
9102 MONO_ADD_INS (cfg->cbb, load);
9104 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
9105 store->flags |= ins_flag;
9106 MONO_ADD_INS (cfg->cbb, store);
9108 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER)
9109 emit_write_barrier (cfg, sp [0], sp [1]);
9111 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
9123 token = read32 (ip + 1);
9124 klass = mini_get_class (method, token, generic_context);
9125 CHECK_TYPELOAD (klass);
9127 /* Optimize the common ldobj+stloc combination */
9137 loc_index = ip [5] - CEE_STLOC_0;
9144 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
9145 CHECK_LOCAL (loc_index);
9147 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
9148 ins->dreg = cfg->locals [loc_index]->dreg;
9154 /* Optimize the ldobj+stobj combination */
9155 /* The reference case ends up being a load+store anyway */
9156 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass)) {
9161 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
9168 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
9177 CHECK_STACK_OVF (1);
9179 n = read32 (ip + 1);
9181 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
9182 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
9183 ins->type = STACK_OBJ;
9186 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
9187 MonoInst *iargs [1];
9189 EMIT_NEW_PCONST (cfg, iargs [0], mono_method_get_wrapper_data (method, n));
9190 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
9192 if (cfg->opt & MONO_OPT_SHARED) {
9193 MonoInst *iargs [3];
9195 if (cfg->compile_aot) {
9196 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
9198 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
9199 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
9200 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
9201 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
9202 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
9204 if (bblock->out_of_line) {
9205 MonoInst *iargs [2];
9207 if (image == mono_defaults.corlib) {
9209 * Avoid relocations in AOT and save some space by using a
9210 * version of helper_ldstr specialized to mscorlib.
9212 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
9213 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
9215 /* Avoid creating the string object */
9216 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
9217 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
9218 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
9222 if (cfg->compile_aot) {
9223 NEW_LDSTRCONST (cfg, ins, image, n);
9225 MONO_ADD_INS (bblock, ins);
9228 NEW_PCONST (cfg, ins, NULL);
9229 ins->type = STACK_OBJ;
9230 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
9232 OUT_OF_MEMORY_FAILURE;
9235 MONO_ADD_INS (bblock, ins);
9244 MonoInst *iargs [2];
9245 MonoMethodSignature *fsig;
9248 MonoInst *vtable_arg = NULL;
9251 token = read32 (ip + 1);
9252 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
9253 if (!cmethod || mono_loader_get_last_error ())
9255 fsig = mono_method_get_signature (cmethod, image, token);
9259 mono_save_token_info (cfg, image, token, cmethod);
9261 if (!mono_class_init (cmethod->klass))
9262 TYPE_LOAD_ERROR (cmethod->klass);
9264 context_used = mini_method_check_context_used (cfg, cmethod);
9266 if (mono_security_cas_enabled ()) {
9267 if (check_linkdemand (cfg, method, cmethod))
9268 INLINE_FAILURE ("linkdemand");
9269 CHECK_CFG_EXCEPTION;
9270 } else if (mono_security_core_clr_enabled ()) {
9271 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9274 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
9275 emit_generic_class_init (cfg, cmethod->klass);
9276 CHECK_TYPELOAD (cmethod->klass);
9280 if (cfg->gsharedvt) {
9281 if (mini_is_gsharedvt_variable_signature (sig))
9282 GSHAREDVT_FAILURE (*ip);
9286 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
9287 mono_method_is_generic_sharable (cmethod, TRUE)) {
9288 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
9289 mono_class_vtable (cfg->domain, cmethod->klass);
9290 CHECK_TYPELOAD (cmethod->klass);
9292 vtable_arg = emit_get_rgctx_method (cfg, context_used,
9293 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
9296 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
9297 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
9299 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
9301 CHECK_TYPELOAD (cmethod->klass);
9302 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
9307 n = fsig->param_count;
9311 * Generate smaller code for the common newobj <exception> instruction in
9312 * argument checking code.
9314 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
9315 is_exception_class (cmethod->klass) && n <= 2 &&
9316 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
9317 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
9318 MonoInst *iargs [3];
9320 g_assert (!vtable_arg);
9324 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
9327 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
9331 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
9336 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
9339 g_assert_not_reached ();
9347 /* move the args to allow room for 'this' in the first position */
9353 /* check_call_signature () requires sp[0] to be set */
9354 this_ins.type = STACK_OBJ;
9356 if (check_call_signature (cfg, fsig, sp))
9361 if (mini_class_is_system_array (cmethod->klass)) {
9362 g_assert (!vtable_arg);
9364 *sp = emit_get_rgctx_method (cfg, context_used,
9365 cmethod, MONO_RGCTX_INFO_METHOD);
9367 /* Avoid varargs in the common case */
9368 if (fsig->param_count == 1)
9369 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
9370 else if (fsig->param_count == 2)
9371 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
9372 else if (fsig->param_count == 3)
9373 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
9374 else if (fsig->param_count == 4)
9375 alloc = mono_emit_jit_icall (cfg, mono_array_new_4, sp);
9377 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
9378 } else if (cmethod->string_ctor) {
9379 g_assert (!context_used);
9380 g_assert (!vtable_arg);
9381 /* we simply pass a null pointer */
9382 EMIT_NEW_PCONST (cfg, *sp, NULL);
9383 /* now call the string ctor */
9384 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, NULL, NULL, NULL);
9386 MonoInst* callvirt_this_arg = NULL;
9388 if (cmethod->klass->valuetype) {
9389 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
9390 emit_init_rvar (cfg, iargs [0]->dreg, &cmethod->klass->byval_arg);
9391 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
9396 * The code generated by mini_emit_virtual_call () expects
9397 * iargs [0] to be a boxed instance, but luckily the vcall
9398 * will be transformed into a normal call there.
9400 } else if (context_used) {
9401 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
9404 MonoVTable *vtable = NULL;
9406 if (!cfg->compile_aot)
9407 vtable = mono_class_vtable (cfg->domain, cmethod->klass);
9408 CHECK_TYPELOAD (cmethod->klass);
9411 * TypeInitializationExceptions thrown from the mono_runtime_class_init
9412 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
9413 * As a workaround, we call class cctors before allocating objects.
9415 if (mini_field_access_needs_cctor_run (cfg, method, cmethod->klass, vtable) && !(g_slist_find (class_inits, cmethod->klass))) {
9416 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, cmethod->klass, helper_sig_class_init_trampoline, NULL);
9417 if (cfg->verbose_level > 2)
9418 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
9419 class_inits = g_slist_prepend (class_inits, cmethod->klass);
9422 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
9425 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
9428 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
9430 /* Now call the actual ctor */
9431 /* Avoid virtual calls to ctors if possible */
9432 if (mono_class_is_marshalbyref (cmethod->klass))
9433 callvirt_this_arg = sp [0];
9436 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
9437 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9438 type_to_eval_stack_type ((cfg), fsig->ret, ins);
9443 CHECK_CFG_EXCEPTION;
9444 } else if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
9445 !disable_inline && mono_method_check_inlining (cfg, cmethod) &&
9446 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE) &&
9447 !g_list_find (dont_inline, cmethod)) {
9450 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, FALSE))) {
9451 cfg->real_offset += 5;
9454 inline_costs += costs - 5;
9456 INLINE_FAILURE ("inline failure");
9457 // FIXME-VT: Clean this up
9458 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))
9459 GSHAREDVT_FAILURE(*ip);
9460 mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, callvirt_this_arg, NULL, NULL);
9462 } else if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
9465 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE);
9466 mono_emit_calli (cfg, fsig, sp, addr, NULL, vtable_arg);
9467 } else if (context_used &&
9468 (!mono_method_is_generic_sharable (cmethod, TRUE) ||
9469 !mono_class_generic_sharing_enabled (cmethod->klass))) {
9470 MonoInst *cmethod_addr;
9472 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
9473 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
9475 mono_emit_calli (cfg, fsig, sp, cmethod_addr, NULL, vtable_arg);
9477 INLINE_FAILURE ("ctor call");
9478 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp,
9479 callvirt_this_arg, NULL, vtable_arg);
9483 if (alloc == NULL) {
9485 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
9486 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
9500 token = read32 (ip + 1);
9501 klass = mini_get_class (method, token, generic_context);
9502 CHECK_TYPELOAD (klass);
9503 if (sp [0]->type != STACK_OBJ)
9506 context_used = mini_class_check_context_used (cfg, klass);
9508 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
9509 MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
9516 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
9519 if (cfg->compile_aot)
9520 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
9522 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
9524 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
9526 save_cast_details (cfg, klass, sp [0]->dreg, TRUE, &bblock);
9527 *sp++ = mono_emit_method_call (cfg, mono_castclass, args, NULL);
9528 reset_cast_details (cfg);
9531 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
9532 MonoMethod *mono_castclass;
9533 MonoInst *iargs [1];
9536 mono_castclass = mono_marshal_get_castclass (klass);
9539 save_cast_details (cfg, klass, sp [0]->dreg, TRUE, &bblock);
9540 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
9541 iargs, ip, cfg->real_offset, dont_inline, TRUE);
9542 reset_cast_details (cfg);
9543 CHECK_CFG_EXCEPTION;
9544 g_assert (costs > 0);
9547 cfg->real_offset += 5;
9552 inline_costs += costs;
9555 ins = handle_castclass (cfg, klass, *sp, context_used);
9556 CHECK_CFG_EXCEPTION;
9566 token = read32 (ip + 1);
9567 klass = mini_get_class (method, token, generic_context);
9568 CHECK_TYPELOAD (klass);
9569 if (sp [0]->type != STACK_OBJ)
9572 context_used = mini_class_check_context_used (cfg, klass);
9574 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
9575 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
9582 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
9585 if (cfg->compile_aot)
9586 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
9588 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
9590 *sp++ = mono_emit_method_call (cfg, mono_isinst, args, NULL);
9593 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
9594 MonoMethod *mono_isinst;
9595 MonoInst *iargs [1];
9598 mono_isinst = mono_marshal_get_isinst (klass);
9601 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
9602 iargs, ip, cfg->real_offset, dont_inline, TRUE);
9603 CHECK_CFG_EXCEPTION;
9604 g_assert (costs > 0);
9607 cfg->real_offset += 5;
9612 inline_costs += costs;
9615 ins = handle_isinst (cfg, klass, *sp, context_used);
9616 CHECK_CFG_EXCEPTION;
9623 case CEE_UNBOX_ANY: {
9627 token = read32 (ip + 1);
9628 klass = mini_get_class (method, token, generic_context);
9629 CHECK_TYPELOAD (klass);
9631 mono_save_token_info (cfg, image, token, klass);
9633 context_used = mini_class_check_context_used (cfg, klass);
9635 if (mini_is_gsharedvt_klass (cfg, klass)) {
9636 *sp = handle_unbox_gsharedvt (cfg, klass, *sp, &bblock);
9644 if (generic_class_is_reference_type (cfg, klass)) {
9645 /* CASTCLASS FIXME kill this huge slice of duplicated code*/
9646 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
9647 MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
9654 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
9657 /*FIXME AOT support*/
9658 if (cfg->compile_aot)
9659 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
9661 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
9663 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
9664 *sp++ = mono_emit_method_call (cfg, mono_castclass, args, NULL);
9667 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
9668 MonoMethod *mono_castclass;
9669 MonoInst *iargs [1];
9672 mono_castclass = mono_marshal_get_castclass (klass);
9675 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
9676 iargs, ip, cfg->real_offset, dont_inline, TRUE);
9677 CHECK_CFG_EXCEPTION;
9678 g_assert (costs > 0);
9681 cfg->real_offset += 5;
9685 inline_costs += costs;
9687 ins = handle_castclass (cfg, klass, *sp, context_used);
9688 CHECK_CFG_EXCEPTION;
9696 if (mono_class_is_nullable (klass)) {
9697 ins = handle_unbox_nullable (cfg, *sp, klass, context_used);
9704 ins = handle_unbox (cfg, klass, sp, context_used);
9710 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
9723 token = read32 (ip + 1);
9724 klass = mini_get_class (method, token, generic_context);
9725 CHECK_TYPELOAD (klass);
9727 mono_save_token_info (cfg, image, token, klass);
9729 context_used = mini_class_check_context_used (cfg, klass);
9731 if (generic_class_is_reference_type (cfg, klass)) {
9737 if (klass == mono_defaults.void_class)
9739 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
9741 /* frequent check in generic code: box (struct), brtrue */
9743 // FIXME: LLVM can't handle the inconsistent bb linking
9744 if (!mono_class_is_nullable (klass) &&
9745 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) &&
9746 (ip [5] == CEE_BRTRUE ||
9747 ip [5] == CEE_BRTRUE_S ||
9748 ip [5] == CEE_BRFALSE ||
9749 ip [5] == CEE_BRFALSE_S)) {
9750 gboolean is_true = ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S;
9752 MonoBasicBlock *true_bb, *false_bb;
9756 if (cfg->verbose_level > 3) {
9757 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
9758 printf ("<box+brtrue opt>\n");
9766 target = ip + 1 + (signed char)(*ip);
9773 target = ip + 4 + (gint)(read32 (ip));
9777 g_assert_not_reached ();
9781 * We need to link both bblocks, since it is needed for handling stack
9782 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
9783 * Branching to only one of them would lead to inconsistencies, so
9784 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
9786 GET_BBLOCK (cfg, true_bb, target);
9787 GET_BBLOCK (cfg, false_bb, ip);
9789 mono_link_bblock (cfg, cfg->cbb, true_bb);
9790 mono_link_bblock (cfg, cfg->cbb, false_bb);
9792 if (sp != stack_start) {
9793 handle_stack_args (cfg, stack_start, sp - stack_start);
9795 CHECK_UNVERIFIABLE (cfg);
9798 if (COMPILE_LLVM (cfg)) {
9799 dreg = alloc_ireg (cfg);
9800 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
9801 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
9803 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
9805 /* The JIT can't eliminate the iconst+compare */
9806 MONO_INST_NEW (cfg, ins, OP_BR);
9807 ins->inst_target_bb = is_true ? true_bb : false_bb;
9808 MONO_ADD_INS (cfg->cbb, ins);
9811 start_new_bblock = 1;
9815 *sp++ = handle_box (cfg, val, klass, context_used, &bblock);
9817 CHECK_CFG_EXCEPTION;
9826 token = read32 (ip + 1);
9827 klass = mini_get_class (method, token, generic_context);
9828 CHECK_TYPELOAD (klass);
9830 mono_save_token_info (cfg, image, token, klass);
9832 context_used = mini_class_check_context_used (cfg, klass);
9834 if (mono_class_is_nullable (klass)) {
9837 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
9838 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
9842 ins = handle_unbox (cfg, klass, sp, context_used);
9855 MonoClassField *field;
9856 #ifndef DISABLE_REMOTING
9860 gboolean is_instance;
9862 gpointer addr = NULL;
9863 gboolean is_special_static;
9865 MonoInst *store_val = NULL;
9866 MonoInst *thread_ins;
9869 is_instance = (op == CEE_LDFLD || op == CEE_LDFLDA || op == CEE_STFLD);
9871 if (op == CEE_STFLD) {
9879 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
9881 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
9884 if (op == CEE_STSFLD) {
9892 token = read32 (ip + 1);
9893 if (method->wrapper_type != MONO_WRAPPER_NONE) {
9894 field = mono_method_get_wrapper_data (method, token);
9895 klass = field->parent;
9898 field = mono_field_from_token (image, token, &klass, generic_context);
9902 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
9903 FIELD_ACCESS_FAILURE;
9904 mono_class_init (klass);
9906 if (is_instance && *ip != CEE_LDFLDA && is_magic_tls_access (field))
9909 /* if the class is Critical then transparent code cannot access it's fields */
9910 if (!is_instance && mono_security_core_clr_enabled ())
9911 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
9913 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
9914 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
9915 if (mono_security_core_clr_enabled ())
9916 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
9920 * LDFLD etc. is usable on static fields as well, so convert those cases to
9923 if (is_instance && field->type->attrs & FIELD_ATTRIBUTE_STATIC) {
9935 g_assert_not_reached ();
9937 is_instance = FALSE;
9940 context_used = mini_class_check_context_used (cfg, klass);
9944 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
9945 if (op == CEE_STFLD) {
9946 if (target_type_is_incompatible (cfg, field->type, sp [1]))
9948 #ifndef DISABLE_REMOTING
9949 if ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class) {
9950 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
9951 MonoInst *iargs [5];
9953 GSHAREDVT_FAILURE (op);
9956 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
9957 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
9958 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
9962 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
9963 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
9964 iargs, ip, cfg->real_offset, dont_inline, TRUE);
9965 CHECK_CFG_EXCEPTION;
9966 g_assert (costs > 0);
9968 cfg->real_offset += 5;
9971 inline_costs += costs;
9973 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
9980 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
9982 if (mini_is_gsharedvt_klass (cfg, klass)) {
9983 MonoInst *offset_ins;
9985 context_used = mini_class_check_context_used (cfg, klass);
9987 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
9988 dreg = alloc_ireg_mp (cfg);
9989 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
9990 /* The decomposition will call mini_emit_stobj () which will emit a wbarrier if needed */
9991 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, dreg, 0, sp [1]->dreg);
9993 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
9995 if (sp [0]->opcode != OP_LDADDR)
9996 store->flags |= MONO_INST_FAULT;
9998 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
9999 /* insert call to write barrier */
10003 dreg = alloc_ireg_mp (cfg);
10004 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
10005 emit_write_barrier (cfg, ptr, sp [1]);
10008 store->flags |= ins_flag;
10015 #ifndef DISABLE_REMOTING
10016 if (is_instance && ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class)) {
10017 MonoMethod *wrapper = (op == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
10018 MonoInst *iargs [4];
10020 GSHAREDVT_FAILURE (op);
10022 iargs [0] = sp [0];
10023 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10024 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
10025 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
10026 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
10027 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
10028 iargs, ip, cfg->real_offset, dont_inline, TRUE);
10029 CHECK_CFG_EXCEPTION;
10031 g_assert (costs > 0);
10033 cfg->real_offset += 5;
10037 inline_costs += costs;
10039 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
10045 if (sp [0]->type == STACK_VTYPE) {
10048 /* Have to compute the address of the variable */
10050 var = get_vreg_to_inst (cfg, sp [0]->dreg);
10052 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
10054 g_assert (var->klass == klass);
10056 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
10060 if (op == CEE_LDFLDA) {
10061 if (is_magic_tls_access (field)) {
10062 GSHAREDVT_FAILURE (*ip);
10064 *sp++ = create_magic_tls_access (cfg, field, &cached_tls_addr, ins);
10066 if (sp [0]->type == STACK_OBJ) {
10067 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
10068 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
10071 dreg = alloc_ireg_mp (cfg);
10073 if (mini_is_gsharedvt_klass (cfg, klass)) {
10074 MonoInst *offset_ins;
10076 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10077 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10079 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
10081 ins->klass = mono_class_from_mono_type (field->type);
10082 ins->type = STACK_MP;
10088 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
10090 if (mini_is_gsharedvt_klass (cfg, klass)) {
10091 MonoInst *offset_ins;
10093 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10094 dreg = alloc_ireg_mp (cfg);
10095 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10096 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, dreg, 0);
10098 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
10100 load->flags |= ins_flag;
10101 if (sp [0]->opcode != OP_LDADDR)
10102 load->flags |= MONO_INST_FAULT;
10116 * We can only support shared generic static
10117 * field access on architectures where the
10118 * trampoline code has been extended to handle
10119 * the generic class init.
10121 #ifndef MONO_ARCH_VTABLE_REG
10122 GENERIC_SHARING_FAILURE (op);
10125 context_used = mini_class_check_context_used (cfg, klass);
10127 ftype = mono_field_get_type (field);
10129 if (ftype->attrs & FIELD_ATTRIBUTE_LITERAL)
10132 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
10133 * to be called here.
10135 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
10136 mono_class_vtable (cfg->domain, klass);
10137 CHECK_TYPELOAD (klass);
10139 mono_domain_lock (cfg->domain);
10140 if (cfg->domain->special_static_fields)
10141 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
10142 mono_domain_unlock (cfg->domain);
10144 is_special_static = mono_class_field_is_special_static (field);
10146 if (is_special_static && ((gsize)addr & 0x80000000) == 0)
10147 thread_ins = mono_get_thread_intrinsic (cfg);
10151 /* Generate IR to compute the field address */
10152 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && thread_ins && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
10154 * Fast access to TLS data
10155 * Inline version of get_thread_static_data () in
10159 int idx, static_data_reg, array_reg, dreg;
10161 GSHAREDVT_FAILURE (op);
10163 // offset &= 0x7fffffff;
10164 // idx = (offset >> 24) - 1;
10165 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
10166 MONO_ADD_INS (cfg->cbb, thread_ins);
10167 static_data_reg = alloc_ireg (cfg);
10168 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, G_STRUCT_OFFSET (MonoInternalThread, static_data));
10170 if (cfg->compile_aot) {
10171 int offset_reg, offset2_reg, idx_reg;
10173 /* For TLS variables, this will return the TLS offset */
10174 EMIT_NEW_SFLDACONST (cfg, ins, field);
10175 offset_reg = ins->dreg;
10176 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
10177 idx_reg = alloc_ireg (cfg);
10178 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
10179 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
10180 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
10181 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
10182 array_reg = alloc_ireg (cfg);
10183 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
10184 offset2_reg = alloc_ireg (cfg);
10185 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
10186 dreg = alloc_ireg (cfg);
10187 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
10189 offset = (gsize)addr & 0x7fffffff;
10190 idx = (offset >> 24) - 1;
10192 array_reg = alloc_ireg (cfg);
10193 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
10194 dreg = alloc_ireg (cfg);
10195 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, (offset & 0xffffff));
10197 } else if ((cfg->opt & MONO_OPT_SHARED) ||
10198 (cfg->compile_aot && is_special_static) ||
10199 (context_used && is_special_static)) {
10200 MonoInst *iargs [2];
10202 g_assert (field->parent);
10203 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10204 if (context_used) {
10205 iargs [1] = emit_get_rgctx_field (cfg, context_used,
10206 field, MONO_RGCTX_INFO_CLASS_FIELD);
10208 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
10210 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
10211 } else if (context_used) {
10212 MonoInst *static_data;
10215 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
10216 method->klass->name_space, method->klass->name, method->name,
10217 depth, field->offset);
10220 if (mono_class_needs_cctor_run (klass, method))
10221 emit_generic_class_init (cfg, klass);
10224 * The pointer we're computing here is
10226 * super_info.static_data + field->offset
10228 static_data = emit_get_rgctx_klass (cfg, context_used,
10229 klass, MONO_RGCTX_INFO_STATIC_DATA);
10231 if (mini_is_gsharedvt_klass (cfg, klass)) {
10232 MonoInst *offset_ins;
10234 offset_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10235 dreg = alloc_ireg_mp (cfg);
10236 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, static_data->dreg, offset_ins->dreg);
10237 } else if (field->offset == 0) {
10240 int addr_reg = mono_alloc_preg (cfg);
10241 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
10243 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
10244 MonoInst *iargs [2];
10246 g_assert (field->parent);
10247 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10248 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
10249 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
10251 MonoVTable *vtable = NULL;
10253 if (!cfg->compile_aot)
10254 vtable = mono_class_vtable (cfg->domain, klass);
10255 CHECK_TYPELOAD (klass);
10258 if (mini_field_access_needs_cctor_run (cfg, method, klass, vtable)) {
10259 if (!(g_slist_find (class_inits, klass))) {
10260 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, klass, helper_sig_class_init_trampoline, NULL);
10261 if (cfg->verbose_level > 2)
10262 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
10263 class_inits = g_slist_prepend (class_inits, klass);
10266 if (cfg->run_cctors) {
10268 /* This makes so that inline cannot trigger */
10269 /* .cctors: too many apps depend on them */
10270 /* running with a specific order... */
10272 if (! vtable->initialized)
10273 INLINE_FAILURE ("class init");
10274 ex = mono_runtime_class_init_full (vtable, FALSE);
10276 set_exception_object (cfg, ex);
10277 goto exception_exit;
10281 if (cfg->compile_aot)
10282 EMIT_NEW_SFLDACONST (cfg, ins, field);
10285 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
10287 EMIT_NEW_PCONST (cfg, ins, addr);
10290 MonoInst *iargs [1];
10291 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
10292 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
10296 /* Generate IR to do the actual load/store operation */
10298 if (op == CEE_LDSFLDA) {
10299 ins->klass = mono_class_from_mono_type (ftype);
10300 ins->type = STACK_PTR;
10302 } else if (op == CEE_STSFLD) {
10305 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, ftype, ins->dreg, 0, store_val->dreg);
10306 store->flags |= ins_flag;
10308 gboolean is_const = FALSE;
10309 MonoVTable *vtable = NULL;
10310 gpointer addr = NULL;
10312 if (!context_used) {
10313 vtable = mono_class_vtable (cfg->domain, klass);
10314 CHECK_TYPELOAD (klass);
10316 if ((ftype->attrs & FIELD_ATTRIBUTE_INIT_ONLY) && (((addr = mono_aot_readonly_field_override (field)) != NULL) ||
10317 (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) && vtable->initialized))) {
10318 int ro_type = ftype->type;
10320 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
10321 if (ro_type == MONO_TYPE_VALUETYPE && ftype->data.klass->enumtype) {
10322 ro_type = mono_class_enum_basetype (ftype->data.klass)->type;
10325 GSHAREDVT_FAILURE (op);
10327 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
10330 case MONO_TYPE_BOOLEAN:
10332 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
10336 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
10339 case MONO_TYPE_CHAR:
10341 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
10345 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
10350 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
10354 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
10359 case MONO_TYPE_PTR:
10360 case MONO_TYPE_FNPTR:
10361 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
10362 type_to_eval_stack_type ((cfg), field->type, *sp);
10365 case MONO_TYPE_STRING:
10366 case MONO_TYPE_OBJECT:
10367 case MONO_TYPE_CLASS:
10368 case MONO_TYPE_SZARRAY:
10369 case MONO_TYPE_ARRAY:
10370 if (!mono_gc_is_moving ()) {
10371 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
10372 type_to_eval_stack_type ((cfg), field->type, *sp);
10380 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
10385 case MONO_TYPE_VALUETYPE:
10395 CHECK_STACK_OVF (1);
10397 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
10398 load->flags |= ins_flag;
10411 token = read32 (ip + 1);
10412 klass = mini_get_class (method, token, generic_context);
10413 CHECK_TYPELOAD (klass);
10414 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
10415 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
10416 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
10417 generic_class_is_reference_type (cfg, klass)) {
10418 /* insert call to write barrier */
10419 emit_write_barrier (cfg, sp [0], sp [1]);
10431 const char *data_ptr;
10433 guint32 field_token;
10439 token = read32 (ip + 1);
10441 klass = mini_get_class (method, token, generic_context);
10442 CHECK_TYPELOAD (klass);
10444 context_used = mini_class_check_context_used (cfg, klass);
10446 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
10447 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_OVF_U4);
10448 ins->sreg1 = sp [0]->dreg;
10449 ins->type = STACK_I4;
10450 ins->dreg = alloc_ireg (cfg);
10451 MONO_ADD_INS (cfg->cbb, ins);
10452 *sp = mono_decompose_opcode (cfg, ins);
10455 if (context_used) {
10456 MonoInst *args [3];
10457 MonoClass *array_class = mono_array_class_get (klass, 1);
10458 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class);
10460 /* FIXME: Use OP_NEWARR and decompose later to help abcrem */
10463 args [0] = emit_get_rgctx_klass (cfg, context_used,
10464 array_class, MONO_RGCTX_INFO_VTABLE);
10469 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
10471 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
10473 if (cfg->opt & MONO_OPT_SHARED) {
10474 /* Decompose now to avoid problems with references to the domainvar */
10475 MonoInst *iargs [3];
10477 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10478 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10479 iargs [2] = sp [0];
10481 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
10483 /* Decompose later since it is needed by abcrem */
10484 MonoClass *array_type = mono_array_class_get (klass, 1);
10485 mono_class_vtable (cfg->domain, array_type);
10486 CHECK_TYPELOAD (array_type);
10488 MONO_INST_NEW (cfg, ins, OP_NEWARR);
10489 ins->dreg = alloc_ireg_ref (cfg);
10490 ins->sreg1 = sp [0]->dreg;
10491 ins->inst_newa_class = klass;
10492 ins->type = STACK_OBJ;
10493 ins->klass = array_type;
10494 MONO_ADD_INS (cfg->cbb, ins);
10495 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
10496 cfg->cbb->has_array_access = TRUE;
10498 /* Needed so mono_emit_load_get_addr () gets called */
10499 mono_get_got_var (cfg);
10509 * we inline/optimize the initialization sequence if possible.
10510 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
10511 * for small sizes open code the memcpy
10512 * ensure the rva field is big enough
10514 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
10515 MonoMethod *memcpy_method = get_memcpy_method ();
10516 MonoInst *iargs [3];
10517 int add_reg = alloc_ireg_mp (cfg);
10519 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, G_STRUCT_OFFSET (MonoArray, vector));
10520 if (cfg->compile_aot) {
10521 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
10523 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
10525 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
10526 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
10535 if (sp [0]->type != STACK_OBJ)
10538 MONO_INST_NEW (cfg, ins, OP_LDLEN);
10539 ins->dreg = alloc_preg (cfg);
10540 ins->sreg1 = sp [0]->dreg;
10541 ins->type = STACK_I4;
10542 /* This flag will be inherited by the decomposition */
10543 ins->flags |= MONO_INST_FAULT;
10544 MONO_ADD_INS (cfg->cbb, ins);
10545 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
10546 cfg->cbb->has_array_access = TRUE;
10554 if (sp [0]->type != STACK_OBJ)
10557 cfg->flags |= MONO_CFG_HAS_LDELEMA;
10559 klass = mini_get_class (method, read32 (ip + 1), generic_context);
10560 CHECK_TYPELOAD (klass);
10561 /* we need to make sure that this array is exactly the type it needs
10562 * to be for correctness. the wrappers are lax with their usage
10563 * so we need to ignore them here
10565 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
10566 MonoClass *array_class = mono_array_class_get (klass, 1);
10567 mini_emit_check_array_type (cfg, sp [0], array_class);
10568 CHECK_TYPELOAD (array_class);
10572 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
10577 case CEE_LDELEM_I1:
10578 case CEE_LDELEM_U1:
10579 case CEE_LDELEM_I2:
10580 case CEE_LDELEM_U2:
10581 case CEE_LDELEM_I4:
10582 case CEE_LDELEM_U4:
10583 case CEE_LDELEM_I8:
10585 case CEE_LDELEM_R4:
10586 case CEE_LDELEM_R8:
10587 case CEE_LDELEM_REF: {
10593 if (*ip == CEE_LDELEM) {
10595 token = read32 (ip + 1);
10596 klass = mini_get_class (method, token, generic_context);
10597 CHECK_TYPELOAD (klass);
10598 mono_class_init (klass);
10601 klass = array_access_to_klass (*ip);
10603 if (sp [0]->type != STACK_OBJ)
10606 cfg->flags |= MONO_CFG_HAS_LDELEMA;
10608 if (mini_is_gsharedvt_variable_klass (cfg, klass)) {
10609 // FIXME-VT: OP_ICONST optimization
10610 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
10611 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
10612 ins->opcode = OP_LOADV_MEMBASE;
10613 } else if (sp [1]->opcode == OP_ICONST) {
10614 int array_reg = sp [0]->dreg;
10615 int index_reg = sp [1]->dreg;
10616 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
10618 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
10619 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
10621 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
10622 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
10625 if (*ip == CEE_LDELEM)
10632 case CEE_STELEM_I1:
10633 case CEE_STELEM_I2:
10634 case CEE_STELEM_I4:
10635 case CEE_STELEM_I8:
10636 case CEE_STELEM_R4:
10637 case CEE_STELEM_R8:
10638 case CEE_STELEM_REF:
10643 cfg->flags |= MONO_CFG_HAS_LDELEMA;
10645 if (*ip == CEE_STELEM) {
10647 token = read32 (ip + 1);
10648 klass = mini_get_class (method, token, generic_context);
10649 CHECK_TYPELOAD (klass);
10650 mono_class_init (klass);
10653 klass = array_access_to_klass (*ip);
10655 if (sp [0]->type != STACK_OBJ)
10658 emit_array_store (cfg, klass, sp, TRUE);
10660 if (*ip == CEE_STELEM)
10667 case CEE_CKFINITE: {
10671 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
10672 ins->sreg1 = sp [0]->dreg;
10673 ins->dreg = alloc_freg (cfg);
10674 ins->type = STACK_R8;
10675 MONO_ADD_INS (bblock, ins);
10677 *sp++ = mono_decompose_opcode (cfg, ins);
10682 case CEE_REFANYVAL: {
10683 MonoInst *src_var, *src;
10685 int klass_reg = alloc_preg (cfg);
10686 int dreg = alloc_preg (cfg);
10688 GSHAREDVT_FAILURE (*ip);
10691 MONO_INST_NEW (cfg, ins, *ip);
10694 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
10695 CHECK_TYPELOAD (klass);
10696 mono_class_init (klass);
10698 context_used = mini_class_check_context_used (cfg, klass);
10701 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
10703 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
10704 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
10705 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass));
10707 if (context_used) {
10708 MonoInst *klass_ins;
10710 klass_ins = emit_get_rgctx_klass (cfg, context_used,
10711 klass, MONO_RGCTX_INFO_KLASS);
10714 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
10715 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
10717 mini_emit_class_check (cfg, klass_reg, klass);
10719 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, value));
10720 ins->type = STACK_MP;
10725 case CEE_MKREFANY: {
10726 MonoInst *loc, *addr;
10728 GSHAREDVT_FAILURE (*ip);
10731 MONO_INST_NEW (cfg, ins, *ip);
10734 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
10735 CHECK_TYPELOAD (klass);
10736 mono_class_init (klass);
10738 context_used = mini_class_check_context_used (cfg, klass);
10740 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
10741 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
10743 if (context_used) {
10744 MonoInst *const_ins;
10745 int type_reg = alloc_preg (cfg);
10747 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
10748 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
10749 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, G_STRUCT_OFFSET (MonoClass, byval_arg));
10750 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
10751 } else if (cfg->compile_aot) {
10752 int const_reg = alloc_preg (cfg);
10753 int type_reg = alloc_preg (cfg);
10755 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
10756 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
10757 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, G_STRUCT_OFFSET (MonoClass, byval_arg));
10758 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
10760 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
10761 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), klass);
10763 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
10765 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
10766 ins->type = STACK_VTYPE;
10767 ins->klass = mono_defaults.typed_reference_class;
10772 case CEE_LDTOKEN: {
10774 MonoClass *handle_class;
10776 CHECK_STACK_OVF (1);
10779 n = read32 (ip + 1);
10781 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
10782 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
10783 handle = mono_method_get_wrapper_data (method, n);
10784 handle_class = mono_method_get_wrapper_data (method, n + 1);
10785 if (handle_class == mono_defaults.typehandle_class)
10786 handle = &((MonoClass*)handle)->byval_arg;
10789 handle = mono_ldtoken (image, n, &handle_class, generic_context);
10793 mono_class_init (handle_class);
10794 if (cfg->generic_sharing_context) {
10795 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
10796 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
10797 /* This case handles ldtoken
10798 of an open type, like for
10801 } else if (handle_class == mono_defaults.typehandle_class) {
10802 /* If we get a MONO_TYPE_CLASS
10803 then we need to provide the
10805 instantiation of it. */
10806 if (mono_type_get_type (handle) == MONO_TYPE_CLASS)
10809 context_used = mini_class_check_context_used (cfg, mono_class_from_mono_type (handle));
10810 } else if (handle_class == mono_defaults.fieldhandle_class)
10811 context_used = mini_class_check_context_used (cfg, ((MonoClassField*)handle)->parent);
10812 else if (handle_class == mono_defaults.methodhandle_class)
10813 context_used = mini_method_check_context_used (cfg, handle);
10815 g_assert_not_reached ();
10818 if ((cfg->opt & MONO_OPT_SHARED) &&
10819 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
10820 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
10821 MonoInst *addr, *vtvar, *iargs [3];
10822 int method_context_used;
10824 method_context_used = mini_method_check_context_used (cfg, method);
10826 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
10828 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
10829 EMIT_NEW_ICONST (cfg, iargs [1], n);
10830 if (method_context_used) {
10831 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
10832 method, MONO_RGCTX_INFO_METHOD);
10833 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
10835 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
10836 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
10838 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
10840 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
10842 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
10844 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
10845 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
10846 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
10847 (cmethod->klass == mono_defaults.systemtype_class) &&
10848 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
10849 MonoClass *tclass = mono_class_from_mono_type (handle);
10851 mono_class_init (tclass);
10852 if (context_used) {
10853 ins = emit_get_rgctx_klass (cfg, context_used,
10854 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
10855 } else if (cfg->compile_aot) {
10856 if (method->wrapper_type) {
10857 if (mono_class_get (tclass->image, tclass->type_token) == tclass && !generic_context) {
10858 /* Special case for static synchronized wrappers */
10859 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
10861 /* FIXME: n is not a normal token */
10863 EMIT_NEW_PCONST (cfg, ins, NULL);
10866 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
10869 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
10871 ins->type = STACK_OBJ;
10872 ins->klass = cmethod->klass;
10875 MonoInst *addr, *vtvar;
10877 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
10879 if (context_used) {
10880 if (handle_class == mono_defaults.typehandle_class) {
10881 ins = emit_get_rgctx_klass (cfg, context_used,
10882 mono_class_from_mono_type (handle),
10883 MONO_RGCTX_INFO_TYPE);
10884 } else if (handle_class == mono_defaults.methodhandle_class) {
10885 ins = emit_get_rgctx_method (cfg, context_used,
10886 handle, MONO_RGCTX_INFO_METHOD);
10887 } else if (handle_class == mono_defaults.fieldhandle_class) {
10888 ins = emit_get_rgctx_field (cfg, context_used,
10889 handle, MONO_RGCTX_INFO_CLASS_FIELD);
10891 g_assert_not_reached ();
10893 } else if (cfg->compile_aot) {
10894 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n, generic_context);
10896 EMIT_NEW_PCONST (cfg, ins, handle);
10898 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
10899 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
10900 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
10910 MONO_INST_NEW (cfg, ins, OP_THROW);
10912 ins->sreg1 = sp [0]->dreg;
10914 bblock->out_of_line = TRUE;
10915 MONO_ADD_INS (bblock, ins);
10916 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
10917 MONO_ADD_INS (bblock, ins);
10920 link_bblock (cfg, bblock, end_bblock);
10921 start_new_bblock = 1;
10923 case CEE_ENDFINALLY:
10924 /* mono_save_seq_point_info () depends on this */
10925 if (sp != stack_start)
10926 emit_seq_point (cfg, method, ip, FALSE, FALSE);
10927 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
10928 MONO_ADD_INS (bblock, ins);
10930 start_new_bblock = 1;
10933 * Control will leave the method so empty the stack, otherwise
10934 * the next basic block will start with a nonempty stack.
10936 while (sp != stack_start) {
10941 case CEE_LEAVE_S: {
10944 if (*ip == CEE_LEAVE) {
10946 target = ip + 5 + (gint32)read32(ip + 1);
10949 target = ip + 2 + (signed char)(ip [1]);
10952 /* empty the stack */
10953 while (sp != stack_start) {
10958 * If this leave statement is in a catch block, check for a
10959 * pending exception, and rethrow it if necessary.
10960 * We avoid doing this in runtime invoke wrappers, since those are called
10961 * by native code which excepts the wrapper to catch all exceptions.
10963 for (i = 0; i < header->num_clauses; ++i) {
10964 MonoExceptionClause *clause = &header->clauses [i];
10967 * Use <= in the final comparison to handle clauses with multiple
10968 * leave statements, like in bug #78024.
10969 * The ordering of the exception clauses guarantees that we find the
10970 * innermost clause.
10972 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
10974 MonoBasicBlock *dont_throw;
10979 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
10982 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
10984 NEW_BBLOCK (cfg, dont_throw);
10987 * Currently, we always rethrow the abort exception, despite the
10988 * fact that this is not correct. See thread6.cs for an example.
10989 * But propagating the abort exception is more important than
10990 * getting the sematics right.
10992 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
10993 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
10994 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
10996 MONO_START_BB (cfg, dont_throw);
11001 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
11003 MonoExceptionClause *clause;
11005 for (tmp = handlers; tmp; tmp = tmp->next) {
11006 clause = tmp->data;
11007 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
11009 link_bblock (cfg, bblock, tblock);
11010 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
11011 ins->inst_target_bb = tblock;
11012 ins->inst_eh_block = clause;
11013 MONO_ADD_INS (bblock, ins);
11014 bblock->has_call_handler = 1;
11015 if (COMPILE_LLVM (cfg)) {
11016 MonoBasicBlock *target_bb;
11019 * Link the finally bblock with the target, since it will
11020 * conceptually branch there.
11021 * FIXME: Have to link the bblock containing the endfinally.
11023 GET_BBLOCK (cfg, target_bb, target);
11024 link_bblock (cfg, tblock, target_bb);
11027 g_list_free (handlers);
11030 MONO_INST_NEW (cfg, ins, OP_BR);
11031 MONO_ADD_INS (bblock, ins);
11032 GET_BBLOCK (cfg, tblock, target);
11033 link_bblock (cfg, bblock, tblock);
11034 ins->inst_target_bb = tblock;
11035 start_new_bblock = 1;
11037 if (*ip == CEE_LEAVE)
11046 * Mono specific opcodes
11048 case MONO_CUSTOM_PREFIX: {
11050 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
11054 case CEE_MONO_ICALL: {
11056 MonoJitICallInfo *info;
11058 token = read32 (ip + 2);
11059 func = mono_method_get_wrapper_data (method, token);
11060 info = mono_find_jit_icall_by_addr (func);
11062 g_error ("Could not find icall address in wrapper %s", mono_method_full_name (method, 1));
11065 CHECK_STACK (info->sig->param_count);
11066 sp -= info->sig->param_count;
11068 ins = mono_emit_jit_icall (cfg, info->func, sp);
11069 if (!MONO_TYPE_IS_VOID (info->sig->ret))
11073 inline_costs += 10 * num_calls++;
11077 case CEE_MONO_LDPTR: {
11080 CHECK_STACK_OVF (1);
11082 token = read32 (ip + 2);
11084 ptr = mono_method_get_wrapper_data (method, token);
11085 /* FIXME: Generalize this */
11086 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
11087 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
11092 EMIT_NEW_PCONST (cfg, ins, ptr);
11095 inline_costs += 10 * num_calls++;
11096 /* Can't embed random pointers into AOT code */
11100 case CEE_MONO_JIT_ICALL_ADDR: {
11101 MonoJitICallInfo *callinfo;
11104 CHECK_STACK_OVF (1);
11106 token = read32 (ip + 2);
11108 ptr = mono_method_get_wrapper_data (method, token);
11109 callinfo = mono_find_jit_icall_by_addr (ptr);
11110 g_assert (callinfo);
11111 EMIT_NEW_JIT_ICALL_ADDRCONST (cfg, ins, (char*)callinfo->name);
11114 inline_costs += 10 * num_calls++;
11117 case CEE_MONO_ICALL_ADDR: {
11118 MonoMethod *cmethod;
11121 CHECK_STACK_OVF (1);
11123 token = read32 (ip + 2);
11125 cmethod = mono_method_get_wrapper_data (method, token);
11127 if (cfg->compile_aot) {
11128 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
11130 ptr = mono_lookup_internal_call (cmethod);
11132 EMIT_NEW_PCONST (cfg, ins, ptr);
11138 case CEE_MONO_VTADDR: {
11139 MonoInst *src_var, *src;
11145 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
11146 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
11151 case CEE_MONO_NEWOBJ: {
11152 MonoInst *iargs [2];
11154 CHECK_STACK_OVF (1);
11156 token = read32 (ip + 2);
11157 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
11158 mono_class_init (klass);
11159 NEW_DOMAINCONST (cfg, iargs [0]);
11160 MONO_ADD_INS (cfg->cbb, iargs [0]);
11161 NEW_CLASSCONST (cfg, iargs [1], klass);
11162 MONO_ADD_INS (cfg->cbb, iargs [1]);
11163 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
11165 inline_costs += 10 * num_calls++;
11168 case CEE_MONO_OBJADDR:
11171 MONO_INST_NEW (cfg, ins, OP_MOVE);
11172 ins->dreg = alloc_ireg_mp (cfg);
11173 ins->sreg1 = sp [0]->dreg;
11174 ins->type = STACK_MP;
11175 MONO_ADD_INS (cfg->cbb, ins);
11179 case CEE_MONO_LDNATIVEOBJ:
11181 * Similar to LDOBJ, but instead load the unmanaged
11182 * representation of the vtype to the stack.
11187 token = read32 (ip + 2);
11188 klass = mono_method_get_wrapper_data (method, token);
11189 g_assert (klass->valuetype);
11190 mono_class_init (klass);
11193 MonoInst *src, *dest, *temp;
11196 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
11197 temp->backend.is_pinvoke = 1;
11198 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
11199 mini_emit_stobj (cfg, dest, src, klass, TRUE);
11201 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
11202 dest->type = STACK_VTYPE;
11203 dest->klass = klass;
11209 case CEE_MONO_RETOBJ: {
11211 * Same as RET, but return the native representation of a vtype
11214 g_assert (cfg->ret);
11215 g_assert (mono_method_signature (method)->pinvoke);
11220 token = read32 (ip + 2);
11221 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
11223 if (!cfg->vret_addr) {
11224 g_assert (cfg->ret_var_is_local);
11226 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
11228 EMIT_NEW_RETLOADA (cfg, ins);
11230 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
11232 if (sp != stack_start)
11235 MONO_INST_NEW (cfg, ins, OP_BR);
11236 ins->inst_target_bb = end_bblock;
11237 MONO_ADD_INS (bblock, ins);
11238 link_bblock (cfg, bblock, end_bblock);
11239 start_new_bblock = 1;
11243 case CEE_MONO_CISINST:
11244 case CEE_MONO_CCASTCLASS: {
11249 token = read32 (ip + 2);
11250 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
11251 if (ip [1] == CEE_MONO_CISINST)
11252 ins = handle_cisinst (cfg, klass, sp [0]);
11254 ins = handle_ccastclass (cfg, klass, sp [0]);
11260 case CEE_MONO_SAVE_LMF:
11261 case CEE_MONO_RESTORE_LMF:
11262 #ifdef MONO_ARCH_HAVE_LMF_OPS
11263 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
11264 MONO_ADD_INS (bblock, ins);
11265 cfg->need_lmf_area = TRUE;
11269 case CEE_MONO_CLASSCONST:
11270 CHECK_STACK_OVF (1);
11272 token = read32 (ip + 2);
11273 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
11276 inline_costs += 10 * num_calls++;
11278 case CEE_MONO_NOT_TAKEN:
11279 bblock->out_of_line = TRUE;
11282 case CEE_MONO_TLS: {
11285 CHECK_STACK_OVF (1);
11287 key = (gint32)read32 (ip + 2);
11288 g_assert (key < TLS_KEY_NUM);
11290 ins = mono_create_tls_get (cfg, key);
11292 if (cfg->compile_aot) {
11294 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
11295 ins->dreg = alloc_preg (cfg);
11296 ins->type = STACK_PTR;
11298 g_assert_not_reached ();
11301 ins->type = STACK_PTR;
11302 MONO_ADD_INS (bblock, ins);
11307 case CEE_MONO_DYN_CALL: {
11308 MonoCallInst *call;
11310 /* It would be easier to call a trampoline, but that would put an
11311 * extra frame on the stack, confusing exception handling. So
11312 * implement it inline using an opcode for now.
11315 if (!cfg->dyn_call_var) {
11316 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
11317 /* prevent it from being register allocated */
11318 cfg->dyn_call_var->flags |= MONO_INST_VOLATILE;
11321 /* Has to use a call inst since it local regalloc expects it */
11322 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
11323 ins = (MonoInst*)call;
11325 ins->sreg1 = sp [0]->dreg;
11326 ins->sreg2 = sp [1]->dreg;
11327 MONO_ADD_INS (bblock, ins);
11329 cfg->param_area = MAX (cfg->param_area, MONO_ARCH_DYN_CALL_PARAM_AREA);
11332 inline_costs += 10 * num_calls++;
11336 case CEE_MONO_MEMORY_BARRIER: {
11338 emit_memory_barrier (cfg, (int)read32 (ip + 1));
11342 case CEE_MONO_JIT_ATTACH: {
11343 MonoInst *args [16];
11344 MonoInst *ad_ins, *lmf_ins;
11345 MonoBasicBlock *next_bb = NULL;
11347 cfg->orig_domain_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
11349 EMIT_NEW_PCONST (cfg, ins, NULL);
11350 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
11356 ad_ins = mono_get_domain_intrinsic (cfg);
11357 lmf_ins = mono_get_lmf_intrinsic (cfg);
11360 if (MONO_ARCH_HAVE_TLS_GET && ad_ins && lmf_ins) {
11361 NEW_BBLOCK (cfg, next_bb);
11363 MONO_ADD_INS (cfg->cbb, ad_ins);
11364 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, ad_ins->dreg, 0);
11365 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, next_bb);
11367 MONO_ADD_INS (cfg->cbb, lmf_ins);
11368 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, lmf_ins->dreg, 0);
11369 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, next_bb);
11372 if (cfg->compile_aot) {
11373 /* AOT code is only used in the root domain */
11374 EMIT_NEW_PCONST (cfg, args [0], NULL);
11376 EMIT_NEW_PCONST (cfg, args [0], cfg->domain);
11378 ins = mono_emit_jit_icall (cfg, mono_jit_thread_attach, args);
11379 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
11382 MONO_START_BB (cfg, next_bb);
11388 case CEE_MONO_JIT_DETACH: {
11389 MonoInst *args [16];
11391 /* Restore the original domain */
11392 dreg = alloc_ireg (cfg);
11393 EMIT_NEW_UNALU (cfg, args [0], OP_MOVE, dreg, cfg->orig_domain_var->dreg);
11394 mono_emit_jit_icall (cfg, mono_jit_set_domain, args);
11399 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
11405 case CEE_PREFIX1: {
11408 case CEE_ARGLIST: {
11409 /* somewhat similar to LDTOKEN */
11410 MonoInst *addr, *vtvar;
11411 CHECK_STACK_OVF (1);
11412 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
11414 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11415 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
11417 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11418 ins->type = STACK_VTYPE;
11419 ins->klass = mono_defaults.argumenthandle_class;
11432 * The following transforms:
11433 * CEE_CEQ into OP_CEQ
11434 * CEE_CGT into OP_CGT
11435 * CEE_CGT_UN into OP_CGT_UN
11436 * CEE_CLT into OP_CLT
11437 * CEE_CLT_UN into OP_CLT_UN
11439 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
11441 MONO_INST_NEW (cfg, ins, cmp->opcode);
11443 cmp->sreg1 = sp [0]->dreg;
11444 cmp->sreg2 = sp [1]->dreg;
11445 type_from_op (cmp, sp [0], sp [1]);
11447 if ((sp [0]->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
11448 cmp->opcode = OP_LCOMPARE;
11449 else if (sp [0]->type == STACK_R8)
11450 cmp->opcode = OP_FCOMPARE;
11452 cmp->opcode = OP_ICOMPARE;
11453 MONO_ADD_INS (bblock, cmp);
11454 ins->type = STACK_I4;
11455 ins->dreg = alloc_dreg (cfg, ins->type);
11456 type_from_op (ins, sp [0], sp [1]);
11458 if (cmp->opcode == OP_FCOMPARE) {
11460 * The backends expect the fceq opcodes to do the
11463 cmp->opcode = OP_NOP;
11464 ins->sreg1 = cmp->sreg1;
11465 ins->sreg2 = cmp->sreg2;
11467 MONO_ADD_INS (bblock, ins);
11473 MonoInst *argconst;
11474 MonoMethod *cil_method;
11476 CHECK_STACK_OVF (1);
11478 n = read32 (ip + 2);
11479 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
11480 if (!cmethod || mono_loader_get_last_error ())
11482 mono_class_init (cmethod->klass);
11484 mono_save_token_info (cfg, image, n, cmethod);
11486 context_used = mini_method_check_context_used (cfg, cmethod);
11488 cil_method = cmethod;
11489 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
11490 METHOD_ACCESS_FAILURE;
11492 if (mono_security_cas_enabled ()) {
11493 if (check_linkdemand (cfg, method, cmethod))
11494 INLINE_FAILURE ("linkdemand");
11495 CHECK_CFG_EXCEPTION;
11496 } else if (mono_security_core_clr_enabled ()) {
11497 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
11501 * Optimize the common case of ldftn+delegate creation
11503 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
11504 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
11505 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
11506 MonoInst *target_ins;
11507 MonoMethod *invoke;
11508 int invoke_context_used;
11510 invoke = mono_get_delegate_invoke (ctor_method->klass);
11511 if (!invoke || !mono_method_signature (invoke))
11514 invoke_context_used = mini_method_check_context_used (cfg, invoke);
11516 target_ins = sp [-1];
11518 if (mono_security_core_clr_enabled ())
11519 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method, bblock, ip);
11521 if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
11522 /*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
11523 if (mono_method_signature (invoke)->param_count == mono_method_signature (cmethod)->param_count) {
11524 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0);
11525 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException");
11529 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE)
11530 /* FIXME: SGEN support */
11531 if (invoke_context_used == 0) {
11533 if (cfg->verbose_level > 3)
11534 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
11536 *sp = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used);
11537 CHECK_CFG_EXCEPTION;
11546 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
11547 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
11551 inline_costs += 10 * num_calls++;
11554 case CEE_LDVIRTFTN: {
11555 MonoInst *args [2];
11559 n = read32 (ip + 2);
11560 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
11561 if (!cmethod || mono_loader_get_last_error ())
11563 mono_class_init (cmethod->klass);
11565 context_used = mini_method_check_context_used (cfg, cmethod);
11567 if (mono_security_cas_enabled ()) {
11568 if (check_linkdemand (cfg, method, cmethod))
11569 INLINE_FAILURE ("linkdemand");
11570 CHECK_CFG_EXCEPTION;
11571 } else if (mono_security_core_clr_enabled ()) {
11572 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
11578 args [1] = emit_get_rgctx_method (cfg, context_used,
11579 cmethod, MONO_RGCTX_INFO_METHOD);
11582 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
11584 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
11587 inline_costs += 10 * num_calls++;
11591 CHECK_STACK_OVF (1);
11593 n = read16 (ip + 2);
11595 EMIT_NEW_ARGLOAD (cfg, ins, n);
11600 CHECK_STACK_OVF (1);
11602 n = read16 (ip + 2);
11604 NEW_ARGLOADA (cfg, ins, n);
11605 MONO_ADD_INS (cfg->cbb, ins);
11613 n = read16 (ip + 2);
11615 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
11617 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
11621 CHECK_STACK_OVF (1);
11623 n = read16 (ip + 2);
11625 EMIT_NEW_LOCLOAD (cfg, ins, n);
11630 unsigned char *tmp_ip;
11631 CHECK_STACK_OVF (1);
11633 n = read16 (ip + 2);
11636 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
11642 EMIT_NEW_LOCLOADA (cfg, ins, n);
11651 n = read16 (ip + 2);
11653 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
11655 emit_stloc_ir (cfg, sp, header, n);
11662 if (sp != stack_start)
11664 if (cfg->method != method)
11666 * Inlining this into a loop in a parent could lead to
11667 * stack overflows which is different behavior than the
11668 * non-inlined case, thus disable inlining in this case.
11670 goto inline_failure;
11672 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
11673 ins->dreg = alloc_preg (cfg);
11674 ins->sreg1 = sp [0]->dreg;
11675 ins->type = STACK_PTR;
11676 MONO_ADD_INS (cfg->cbb, ins);
11678 cfg->flags |= MONO_CFG_HAS_ALLOCA;
11680 ins->flags |= MONO_INST_INIT;
11685 case CEE_ENDFILTER: {
11686 MonoExceptionClause *clause, *nearest;
11687 int cc, nearest_num;
11691 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
11693 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
11694 ins->sreg1 = (*sp)->dreg;
11695 MONO_ADD_INS (bblock, ins);
11696 start_new_bblock = 1;
11701 for (cc = 0; cc < header->num_clauses; ++cc) {
11702 clause = &header->clauses [cc];
11703 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
11704 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
11705 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
11710 g_assert (nearest);
11711 if ((ip - header->code) != nearest->handler_offset)
11716 case CEE_UNALIGNED_:
11717 ins_flag |= MONO_INST_UNALIGNED;
11718 /* FIXME: record alignment? we can assume 1 for now */
11722 case CEE_VOLATILE_:
11723 ins_flag |= MONO_INST_VOLATILE;
11727 ins_flag |= MONO_INST_TAILCALL;
11728 cfg->flags |= MONO_CFG_HAS_TAIL;
11729 /* Can't inline tail calls at this time */
11730 inline_costs += 100000;
11737 token = read32 (ip + 2);
11738 klass = mini_get_class (method, token, generic_context);
11739 CHECK_TYPELOAD (klass);
11740 if (generic_class_is_reference_type (cfg, klass))
11741 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
11743 mini_emit_initobj (cfg, *sp, NULL, klass);
11747 case CEE_CONSTRAINED_:
11749 token = read32 (ip + 2);
11750 constrained_call = mini_get_class (method, token, generic_context);
11751 CHECK_TYPELOAD (constrained_call);
11755 case CEE_INITBLK: {
11756 MonoInst *iargs [3];
11760 if ((ip [1] == CEE_CPBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
11761 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
11762 } else if ((ip [1] == CEE_INITBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
11763 /* emit_memset only works when val == 0 */
11764 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
11766 iargs [0] = sp [0];
11767 iargs [1] = sp [1];
11768 iargs [2] = sp [2];
11769 if (ip [1] == CEE_CPBLK) {
11770 MonoMethod *memcpy_method = get_memcpy_method ();
11771 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
11773 MonoMethod *memset_method = get_memset_method ();
11774 mono_emit_method_call (cfg, memset_method, iargs, NULL);
11784 ins_flag |= MONO_INST_NOTYPECHECK;
11786 ins_flag |= MONO_INST_NORANGECHECK;
11787 /* we ignore the no-nullcheck for now since we
11788 * really do it explicitly only when doing callvirt->call
11792 case CEE_RETHROW: {
11794 int handler_offset = -1;
11796 for (i = 0; i < header->num_clauses; ++i) {
11797 MonoExceptionClause *clause = &header->clauses [i];
11798 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
11799 handler_offset = clause->handler_offset;
11804 bblock->flags |= BB_EXCEPTION_UNSAFE;
11806 g_assert (handler_offset != -1);
11808 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
11809 MONO_INST_NEW (cfg, ins, OP_RETHROW);
11810 ins->sreg1 = load->dreg;
11811 MONO_ADD_INS (bblock, ins);
11813 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
11814 MONO_ADD_INS (bblock, ins);
11817 link_bblock (cfg, bblock, end_bblock);
11818 start_new_bblock = 1;
11826 GSHAREDVT_FAILURE (*ip);
11828 CHECK_STACK_OVF (1);
11830 token = read32 (ip + 2);
11831 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !method->klass->image->dynamic && !generic_context) {
11832 MonoType *type = mono_type_create_from_typespec (image, token);
11833 val = mono_type_size (type, &ialign);
11835 MonoClass *klass = mono_class_get_full (image, token, generic_context);
11836 CHECK_TYPELOAD (klass);
11837 mono_class_init (klass);
11838 val = mono_type_size (&klass->byval_arg, &ialign);
11840 EMIT_NEW_ICONST (cfg, ins, val);
11845 case CEE_REFANYTYPE: {
11846 MonoInst *src_var, *src;
11848 GSHAREDVT_FAILURE (*ip);
11854 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
11856 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
11857 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
11858 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, type));
11863 case CEE_READONLY_:
11876 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
11886 g_warning ("opcode 0x%02x not handled", *ip);
11890 if (start_new_bblock != 1)
11893 bblock->cil_length = ip - bblock->cil_code;
11894 if (bblock->next_bb) {
11895 /* This could already be set because of inlining, #693905 */
11896 MonoBasicBlock *bb = bblock;
11898 while (bb->next_bb)
11900 bb->next_bb = end_bblock;
11902 bblock->next_bb = end_bblock;
11905 if (cfg->method == method && cfg->domainvar) {
11907 MonoInst *get_domain;
11909 cfg->cbb = init_localsbb;
11911 if ((get_domain = mono_get_domain_intrinsic (cfg))) {
11912 MONO_ADD_INS (cfg->cbb, get_domain);
11914 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
11916 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
11917 MONO_ADD_INS (cfg->cbb, store);
11920 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
11921 if (cfg->compile_aot)
11922 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
11923 mono_get_got_var (cfg);
11926 if (cfg->method == method && cfg->got_var)
11927 mono_emit_load_got_addr (cfg);
11930 cfg->cbb = init_localsbb;
11932 for (i = 0; i < header->num_locals; ++i) {
11933 emit_init_local (cfg, i, header->locals [i]);
11937 if (cfg->init_ref_vars && cfg->method == method) {
11938 /* Emit initialization for ref vars */
11939 // FIXME: Avoid duplication initialization for IL locals.
11940 for (i = 0; i < cfg->num_varinfo; ++i) {
11941 MonoInst *ins = cfg->varinfo [i];
11943 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
11944 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
11948 if (cfg->lmf_var && cfg->method == method) {
11949 cfg->cbb = init_localsbb;
11950 emit_push_lmf (cfg);
11954 MonoBasicBlock *bb;
11957 * Make seq points at backward branch targets interruptable.
11959 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
11960 if (bb->code && bb->in_count > 1 && bb->code->opcode == OP_SEQ_POINT)
11961 bb->code->flags |= MONO_INST_SINGLE_STEP_LOC;
11964 /* Add a sequence point for method entry/exit events */
11966 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
11967 MONO_ADD_INS (init_localsbb, ins);
11968 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
11969 MONO_ADD_INS (cfg->bb_exit, ins);
11973 * Add seq points for IL offsets which have line number info, but wasn't generated a seq point during JITting because
11974 * the code they refer to was dead (#11880).
11976 if (sym_seq_points) {
11977 for (i = 0; i < header->code_size; ++i) {
11978 if (mono_bitset_test_fast (seq_point_locs, i) && !mono_bitset_test_fast (seq_point_set_locs, i)) {
11981 NEW_SEQ_POINT (cfg, ins, i, FALSE);
11982 mono_add_seq_point (cfg, NULL, ins, SEQ_POINT_NATIVE_OFFSET_DEAD_CODE);
11989 if (cfg->method == method) {
11990 MonoBasicBlock *bb;
11991 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
11992 bb->region = mono_find_block_region (cfg, bb->real_offset);
11994 mono_create_spvar_for_region (cfg, bb->region);
11995 if (cfg->verbose_level > 2)
11996 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
12000 g_slist_free (class_inits);
12001 dont_inline = g_list_remove (dont_inline, method);
12003 if (inline_costs < 0) {
12006 /* Method is too large */
12007 mname = mono_method_full_name (method, TRUE);
12008 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
12009 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
12011 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
12012 mono_basic_block_free (original_bb);
12016 if ((cfg->verbose_level > 2) && (cfg->method == method))
12017 mono_print_code (cfg, "AFTER METHOD-TO-IR");
12019 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
12020 mono_basic_block_free (original_bb);
12021 return inline_costs;
12024 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
12031 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
12035 set_exception_type_from_invalid_il (cfg, method, ip);
12039 g_slist_free (class_inits);
12040 mono_basic_block_free (original_bb);
12041 dont_inline = g_list_remove (dont_inline, method);
12042 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
12047 store_membase_reg_to_store_membase_imm (int opcode)
12050 case OP_STORE_MEMBASE_REG:
12051 return OP_STORE_MEMBASE_IMM;
12052 case OP_STOREI1_MEMBASE_REG:
12053 return OP_STOREI1_MEMBASE_IMM;
12054 case OP_STOREI2_MEMBASE_REG:
12055 return OP_STOREI2_MEMBASE_IMM;
12056 case OP_STOREI4_MEMBASE_REG:
12057 return OP_STOREI4_MEMBASE_IMM;
12058 case OP_STOREI8_MEMBASE_REG:
12059 return OP_STOREI8_MEMBASE_IMM;
12061 g_assert_not_reached ();
12068 mono_op_to_op_imm (int opcode)
12072 return OP_IADD_IMM;
12074 return OP_ISUB_IMM;
12076 return OP_IDIV_IMM;
12078 return OP_IDIV_UN_IMM;
12080 return OP_IREM_IMM;
12082 return OP_IREM_UN_IMM;
12084 return OP_IMUL_IMM;
12086 return OP_IAND_IMM;
12090 return OP_IXOR_IMM;
12092 return OP_ISHL_IMM;
12094 return OP_ISHR_IMM;
12096 return OP_ISHR_UN_IMM;
12099 return OP_LADD_IMM;
12101 return OP_LSUB_IMM;
12103 return OP_LAND_IMM;
12107 return OP_LXOR_IMM;
12109 return OP_LSHL_IMM;
12111 return OP_LSHR_IMM;
12113 return OP_LSHR_UN_IMM;
12116 return OP_COMPARE_IMM;
12118 return OP_ICOMPARE_IMM;
12120 return OP_LCOMPARE_IMM;
12122 case OP_STORE_MEMBASE_REG:
12123 return OP_STORE_MEMBASE_IMM;
12124 case OP_STOREI1_MEMBASE_REG:
12125 return OP_STOREI1_MEMBASE_IMM;
12126 case OP_STOREI2_MEMBASE_REG:
12127 return OP_STOREI2_MEMBASE_IMM;
12128 case OP_STOREI4_MEMBASE_REG:
12129 return OP_STOREI4_MEMBASE_IMM;
12131 #if defined(TARGET_X86) || defined (TARGET_AMD64)
12133 return OP_X86_PUSH_IMM;
12134 case OP_X86_COMPARE_MEMBASE_REG:
12135 return OP_X86_COMPARE_MEMBASE_IMM;
12137 #if defined(TARGET_AMD64)
12138 case OP_AMD64_ICOMPARE_MEMBASE_REG:
12139 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
12141 case OP_VOIDCALL_REG:
12142 return OP_VOIDCALL;
12150 return OP_LOCALLOC_IMM;
12157 ldind_to_load_membase (int opcode)
12161 return OP_LOADI1_MEMBASE;
12163 return OP_LOADU1_MEMBASE;
12165 return OP_LOADI2_MEMBASE;
12167 return OP_LOADU2_MEMBASE;
12169 return OP_LOADI4_MEMBASE;
12171 return OP_LOADU4_MEMBASE;
12173 return OP_LOAD_MEMBASE;
12174 case CEE_LDIND_REF:
12175 return OP_LOAD_MEMBASE;
12177 return OP_LOADI8_MEMBASE;
12179 return OP_LOADR4_MEMBASE;
12181 return OP_LOADR8_MEMBASE;
12183 g_assert_not_reached ();
12190 stind_to_store_membase (int opcode)
12194 return OP_STOREI1_MEMBASE_REG;
12196 return OP_STOREI2_MEMBASE_REG;
12198 return OP_STOREI4_MEMBASE_REG;
12200 case CEE_STIND_REF:
12201 return OP_STORE_MEMBASE_REG;
12203 return OP_STOREI8_MEMBASE_REG;
12205 return OP_STORER4_MEMBASE_REG;
12207 return OP_STORER8_MEMBASE_REG;
12209 g_assert_not_reached ();
12216 mono_load_membase_to_load_mem (int opcode)
12218 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
12219 #if defined(TARGET_X86) || defined(TARGET_AMD64)
12221 case OP_LOAD_MEMBASE:
12222 return OP_LOAD_MEM;
12223 case OP_LOADU1_MEMBASE:
12224 return OP_LOADU1_MEM;
12225 case OP_LOADU2_MEMBASE:
12226 return OP_LOADU2_MEM;
12227 case OP_LOADI4_MEMBASE:
12228 return OP_LOADI4_MEM;
12229 case OP_LOADU4_MEMBASE:
12230 return OP_LOADU4_MEM;
12231 #if SIZEOF_REGISTER == 8
12232 case OP_LOADI8_MEMBASE:
12233 return OP_LOADI8_MEM;
12242 op_to_op_dest_membase (int store_opcode, int opcode)
12244 #if defined(TARGET_X86)
12245 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
12250 return OP_X86_ADD_MEMBASE_REG;
12252 return OP_X86_SUB_MEMBASE_REG;
12254 return OP_X86_AND_MEMBASE_REG;
12256 return OP_X86_OR_MEMBASE_REG;
12258 return OP_X86_XOR_MEMBASE_REG;
12261 return OP_X86_ADD_MEMBASE_IMM;
12264 return OP_X86_SUB_MEMBASE_IMM;
12267 return OP_X86_AND_MEMBASE_IMM;
12270 return OP_X86_OR_MEMBASE_IMM;
12273 return OP_X86_XOR_MEMBASE_IMM;
12279 #if defined(TARGET_AMD64)
12280 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
12285 return OP_X86_ADD_MEMBASE_REG;
12287 return OP_X86_SUB_MEMBASE_REG;
12289 return OP_X86_AND_MEMBASE_REG;
12291 return OP_X86_OR_MEMBASE_REG;
12293 return OP_X86_XOR_MEMBASE_REG;
12295 return OP_X86_ADD_MEMBASE_IMM;
12297 return OP_X86_SUB_MEMBASE_IMM;
12299 return OP_X86_AND_MEMBASE_IMM;
12301 return OP_X86_OR_MEMBASE_IMM;
12303 return OP_X86_XOR_MEMBASE_IMM;
12305 return OP_AMD64_ADD_MEMBASE_REG;
12307 return OP_AMD64_SUB_MEMBASE_REG;
12309 return OP_AMD64_AND_MEMBASE_REG;
12311 return OP_AMD64_OR_MEMBASE_REG;
12313 return OP_AMD64_XOR_MEMBASE_REG;
12316 return OP_AMD64_ADD_MEMBASE_IMM;
12319 return OP_AMD64_SUB_MEMBASE_IMM;
12322 return OP_AMD64_AND_MEMBASE_IMM;
12325 return OP_AMD64_OR_MEMBASE_IMM;
12328 return OP_AMD64_XOR_MEMBASE_IMM;
12338 op_to_op_store_membase (int store_opcode, int opcode)
12340 #if defined(TARGET_X86) || defined(TARGET_AMD64)
12343 if (store_opcode == OP_STOREI1_MEMBASE_REG)
12344 return OP_X86_SETEQ_MEMBASE;
12346 if (store_opcode == OP_STOREI1_MEMBASE_REG)
12347 return OP_X86_SETNE_MEMBASE;
12355 op_to_op_src1_membase (int load_opcode, int opcode)
12358 /* FIXME: This has sign extension issues */
12360 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
12361 return OP_X86_COMPARE_MEMBASE8_IMM;
12364 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
12369 return OP_X86_PUSH_MEMBASE;
12370 case OP_COMPARE_IMM:
12371 case OP_ICOMPARE_IMM:
12372 return OP_X86_COMPARE_MEMBASE_IMM;
12375 return OP_X86_COMPARE_MEMBASE_REG;
12379 #ifdef TARGET_AMD64
12380 /* FIXME: This has sign extension issues */
12382 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
12383 return OP_X86_COMPARE_MEMBASE8_IMM;
12388 #ifdef __mono_ilp32__
12389 if (load_opcode == OP_LOADI8_MEMBASE)
12391 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
12393 return OP_X86_PUSH_MEMBASE;
12395 /* FIXME: This only works for 32 bit immediates
12396 case OP_COMPARE_IMM:
12397 case OP_LCOMPARE_IMM:
12398 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
12399 return OP_AMD64_COMPARE_MEMBASE_IMM;
12401 case OP_ICOMPARE_IMM:
12402 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
12403 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
12407 #ifdef __mono_ilp32__
12408 if (load_opcode == OP_LOAD_MEMBASE)
12409 return OP_AMD64_ICOMPARE_MEMBASE_REG;
12410 if (load_opcode == OP_LOADI8_MEMBASE)
12412 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
12414 return OP_AMD64_COMPARE_MEMBASE_REG;
12417 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
12418 return OP_AMD64_ICOMPARE_MEMBASE_REG;
12427 op_to_op_src2_membase (int load_opcode, int opcode)
12430 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
12436 return OP_X86_COMPARE_REG_MEMBASE;
12438 return OP_X86_ADD_REG_MEMBASE;
12440 return OP_X86_SUB_REG_MEMBASE;
12442 return OP_X86_AND_REG_MEMBASE;
12444 return OP_X86_OR_REG_MEMBASE;
12446 return OP_X86_XOR_REG_MEMBASE;
12450 #ifdef TARGET_AMD64
12451 #ifdef __mono_ilp32__
12452 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE) ) {
12454 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)) {
12458 return OP_AMD64_ICOMPARE_REG_MEMBASE;
12460 return OP_X86_ADD_REG_MEMBASE;
12462 return OP_X86_SUB_REG_MEMBASE;
12464 return OP_X86_AND_REG_MEMBASE;
12466 return OP_X86_OR_REG_MEMBASE;
12468 return OP_X86_XOR_REG_MEMBASE;
12470 #ifdef __mono_ilp32__
12471 } else if (load_opcode == OP_LOADI8_MEMBASE) {
12473 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE)) {
12478 return OP_AMD64_COMPARE_REG_MEMBASE;
12480 return OP_AMD64_ADD_REG_MEMBASE;
12482 return OP_AMD64_SUB_REG_MEMBASE;
12484 return OP_AMD64_AND_REG_MEMBASE;
12486 return OP_AMD64_OR_REG_MEMBASE;
12488 return OP_AMD64_XOR_REG_MEMBASE;
12497 mono_op_to_op_imm_noemul (int opcode)
12500 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
12506 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
12513 #if defined(MONO_ARCH_EMULATE_MUL_DIV)
12518 return mono_op_to_op_imm (opcode);
12523 * mono_handle_global_vregs:
12525 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
12529 mono_handle_global_vregs (MonoCompile *cfg)
12531 gint32 *vreg_to_bb;
12532 MonoBasicBlock *bb;
12535 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
12537 #ifdef MONO_ARCH_SIMD_INTRINSICS
12538 if (cfg->uses_simd_intrinsics)
12539 mono_simd_simplify_indirection (cfg);
12542 /* Find local vregs used in more than one bb */
12543 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
12544 MonoInst *ins = bb->code;
12545 int block_num = bb->block_num;
12547 if (cfg->verbose_level > 2)
12548 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
12551 for (; ins; ins = ins->next) {
12552 const char *spec = INS_INFO (ins->opcode);
12553 int regtype = 0, regindex;
12556 if (G_UNLIKELY (cfg->verbose_level > 2))
12557 mono_print_ins (ins);
12559 g_assert (ins->opcode >= MONO_CEE_LAST);
12561 for (regindex = 0; regindex < 4; regindex ++) {
12564 if (regindex == 0) {
12565 regtype = spec [MONO_INST_DEST];
12566 if (regtype == ' ')
12569 } else if (regindex == 1) {
12570 regtype = spec [MONO_INST_SRC1];
12571 if (regtype == ' ')
12574 } else if (regindex == 2) {
12575 regtype = spec [MONO_INST_SRC2];
12576 if (regtype == ' ')
12579 } else if (regindex == 3) {
12580 regtype = spec [MONO_INST_SRC3];
12581 if (regtype == ' ')
12586 #if SIZEOF_REGISTER == 4
12587 /* In the LLVM case, the long opcodes are not decomposed */
12588 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
12590 * Since some instructions reference the original long vreg,
12591 * and some reference the two component vregs, it is quite hard
12592 * to determine when it needs to be global. So be conservative.
12594 if (!get_vreg_to_inst (cfg, vreg)) {
12595 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
12597 if (cfg->verbose_level > 2)
12598 printf ("LONG VREG R%d made global.\n", vreg);
12602 * Make the component vregs volatile since the optimizations can
12603 * get confused otherwise.
12605 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
12606 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
12610 g_assert (vreg != -1);
12612 prev_bb = vreg_to_bb [vreg];
12613 if (prev_bb == 0) {
12614 /* 0 is a valid block num */
12615 vreg_to_bb [vreg] = block_num + 1;
12616 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
12617 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
12620 if (!get_vreg_to_inst (cfg, vreg)) {
12621 if (G_UNLIKELY (cfg->verbose_level > 2))
12622 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
12626 if (vreg_is_ref (cfg, vreg))
12627 mono_compile_create_var_for_vreg (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL, vreg);
12629 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
12632 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
12635 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
12638 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
12641 g_assert_not_reached ();
12645 /* Flag as having been used in more than one bb */
12646 vreg_to_bb [vreg] = -1;
12652 /* If a variable is used in only one bblock, convert it into a local vreg */
12653 for (i = 0; i < cfg->num_varinfo; i++) {
12654 MonoInst *var = cfg->varinfo [i];
12655 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
12657 switch (var->type) {
12663 #if SIZEOF_REGISTER == 8
12666 #if !defined(TARGET_X86)
12667 /* Enabling this screws up the fp stack on x86 */
12670 if (mono_arch_is_soft_float ())
12673 /* Arguments are implicitly global */
12674 /* Putting R4 vars into registers doesn't work currently */
12675 /* The gsharedvt vars are implicitly referenced by ldaddr opcodes, but those opcodes are only generated later */
12676 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg && var != cfg->gsharedvt_info_var && var != cfg->gsharedvt_locals_var && var != cfg->lmf_addr_var) {
12678 * Make that the variable's liveness interval doesn't contain a call, since
12679 * that would cause the lvreg to be spilled, making the whole optimization
12682 /* This is too slow for JIT compilation */
12684 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
12686 int def_index, call_index, ins_index;
12687 gboolean spilled = FALSE;
12692 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
12693 const char *spec = INS_INFO (ins->opcode);
12695 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
12696 def_index = ins_index;
12698 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
12699 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
12700 if (call_index > def_index) {
12706 if (MONO_IS_CALL (ins))
12707 call_index = ins_index;
12717 if (G_UNLIKELY (cfg->verbose_level > 2))
12718 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
12719 var->flags |= MONO_INST_IS_DEAD;
12720 cfg->vreg_to_inst [var->dreg] = NULL;
12727 * Compress the varinfo and vars tables so the liveness computation is faster and
12728 * takes up less space.
12731 for (i = 0; i < cfg->num_varinfo; ++i) {
12732 MonoInst *var = cfg->varinfo [i];
12733 if (pos < i && cfg->locals_start == i)
12734 cfg->locals_start = pos;
12735 if (!(var->flags & MONO_INST_IS_DEAD)) {
12737 cfg->varinfo [pos] = cfg->varinfo [i];
12738 cfg->varinfo [pos]->inst_c0 = pos;
12739 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
12740 cfg->vars [pos].idx = pos;
12741 #if SIZEOF_REGISTER == 4
12742 if (cfg->varinfo [pos]->type == STACK_I8) {
12743 /* Modify the two component vars too */
12746 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
12747 var1->inst_c0 = pos;
12748 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
12749 var1->inst_c0 = pos;
12756 cfg->num_varinfo = pos;
12757 if (cfg->locals_start > cfg->num_varinfo)
12758 cfg->locals_start = cfg->num_varinfo;
12762 * mono_spill_global_vars:
12764 * Generate spill code for variables which are not allocated to registers,
12765 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
12766 * code is generated which could be optimized by the local optimization passes.
12769 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
12771 MonoBasicBlock *bb;
12773 int orig_next_vreg;
12774 guint32 *vreg_to_lvreg;
12776 guint32 i, lvregs_len;
12777 gboolean dest_has_lvreg = FALSE;
12778 guint32 stacktypes [128];
12779 MonoInst **live_range_start, **live_range_end;
12780 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
12781 int *gsharedvt_vreg_to_idx = NULL;
12783 *need_local_opts = FALSE;
12785 memset (spec2, 0, sizeof (spec2));
12787 /* FIXME: Move this function to mini.c */
12788 stacktypes ['i'] = STACK_PTR;
12789 stacktypes ['l'] = STACK_I8;
12790 stacktypes ['f'] = STACK_R8;
12791 #ifdef MONO_ARCH_SIMD_INTRINSICS
12792 stacktypes ['x'] = STACK_VTYPE;
12795 #if SIZEOF_REGISTER == 4
12796 /* Create MonoInsts for longs */
12797 for (i = 0; i < cfg->num_varinfo; i++) {
12798 MonoInst *ins = cfg->varinfo [i];
12800 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
12801 switch (ins->type) {
12806 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
12809 g_assert (ins->opcode == OP_REGOFFSET);
12811 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
12813 tree->opcode = OP_REGOFFSET;
12814 tree->inst_basereg = ins->inst_basereg;
12815 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
12817 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
12819 tree->opcode = OP_REGOFFSET;
12820 tree->inst_basereg = ins->inst_basereg;
12821 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
12831 if (cfg->compute_gc_maps) {
12832 /* registers need liveness info even for !non refs */
12833 for (i = 0; i < cfg->num_varinfo; i++) {
12834 MonoInst *ins = cfg->varinfo [i];
12836 if (ins->opcode == OP_REGVAR)
12837 ins->flags |= MONO_INST_GC_TRACK;
12841 if (cfg->gsharedvt) {
12842 gsharedvt_vreg_to_idx = mono_mempool_alloc0 (cfg->mempool, sizeof (int) * cfg->next_vreg);
12844 for (i = 0; i < cfg->num_varinfo; ++i) {
12845 MonoInst *ins = cfg->varinfo [i];
12848 if (mini_is_gsharedvt_variable_type (cfg, ins->inst_vtype)) {
12849 if (i >= cfg->locals_start) {
12851 idx = get_gsharedvt_info_slot (cfg, ins->inst_vtype, MONO_RGCTX_INFO_LOCAL_OFFSET);
12852 gsharedvt_vreg_to_idx [ins->dreg] = idx + 1;
12853 ins->opcode = OP_GSHAREDVT_LOCAL;
12854 ins->inst_imm = idx;
12857 gsharedvt_vreg_to_idx [ins->dreg] = -1;
12858 ins->opcode = OP_GSHAREDVT_ARG_REGOFFSET;
12864 /* FIXME: widening and truncation */
12867 * As an optimization, when a variable allocated to the stack is first loaded into
12868 * an lvreg, we will remember the lvreg and use it the next time instead of loading
12869 * the variable again.
12871 orig_next_vreg = cfg->next_vreg;
12872 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
12873 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
12877 * These arrays contain the first and last instructions accessing a given
12879 * Since we emit bblocks in the same order we process them here, and we
12880 * don't split live ranges, these will precisely describe the live range of
12881 * the variable, i.e. the instruction range where a valid value can be found
12882 * in the variables location.
12883 * The live range is computed using the liveness info computed by the liveness pass.
12884 * We can't use vmv->range, since that is an abstract live range, and we need
12885 * one which is instruction precise.
12886 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
12888 /* FIXME: Only do this if debugging info is requested */
12889 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
12890 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
12891 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
12892 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
12894 /* Add spill loads/stores */
12895 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
12898 if (cfg->verbose_level > 2)
12899 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
12901 /* Clear vreg_to_lvreg array */
12902 for (i = 0; i < lvregs_len; i++)
12903 vreg_to_lvreg [lvregs [i]] = 0;
12907 MONO_BB_FOR_EACH_INS (bb, ins) {
12908 const char *spec = INS_INFO (ins->opcode);
12909 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
12910 gboolean store, no_lvreg;
12911 int sregs [MONO_MAX_SRC_REGS];
12913 if (G_UNLIKELY (cfg->verbose_level > 2))
12914 mono_print_ins (ins);
12916 if (ins->opcode == OP_NOP)
12920 * We handle LDADDR here as well, since it can only be decomposed
12921 * when variable addresses are known.
12923 if (ins->opcode == OP_LDADDR) {
12924 MonoInst *var = ins->inst_p0;
12926 if (var->opcode == OP_VTARG_ADDR) {
12927 /* Happens on SPARC/S390 where vtypes are passed by reference */
12928 MonoInst *vtaddr = var->inst_left;
12929 if (vtaddr->opcode == OP_REGVAR) {
12930 ins->opcode = OP_MOVE;
12931 ins->sreg1 = vtaddr->dreg;
12933 else if (var->inst_left->opcode == OP_REGOFFSET) {
12934 ins->opcode = OP_LOAD_MEMBASE;
12935 ins->inst_basereg = vtaddr->inst_basereg;
12936 ins->inst_offset = vtaddr->inst_offset;
12939 } else if (cfg->gsharedvt && gsharedvt_vreg_to_idx [var->dreg] < 0) {
12940 /* gsharedvt arg passed by ref */
12941 g_assert (var->opcode == OP_GSHAREDVT_ARG_REGOFFSET);
12943 ins->opcode = OP_LOAD_MEMBASE;
12944 ins->inst_basereg = var->inst_basereg;
12945 ins->inst_offset = var->inst_offset;
12946 } else if (cfg->gsharedvt && gsharedvt_vreg_to_idx [var->dreg]) {
12947 MonoInst *load, *load2, *load3;
12948 int idx = gsharedvt_vreg_to_idx [var->dreg] - 1;
12949 int reg1, reg2, reg3;
12950 MonoInst *info_var = cfg->gsharedvt_info_var;
12951 MonoInst *locals_var = cfg->gsharedvt_locals_var;
12955 * Compute the address of the local as gsharedvt_locals_var + gsharedvt_info_var->locals_offsets [idx].
12958 g_assert (var->opcode == OP_GSHAREDVT_LOCAL);
12960 g_assert (info_var);
12961 g_assert (locals_var);
12963 /* Mark the instruction used to compute the locals var as used */
12964 cfg->gsharedvt_locals_var_ins = NULL;
12966 /* Load the offset */
12967 if (info_var->opcode == OP_REGOFFSET) {
12968 reg1 = alloc_ireg (cfg);
12969 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, reg1, info_var->inst_basereg, info_var->inst_offset);
12970 } else if (info_var->opcode == OP_REGVAR) {
12972 reg1 = info_var->dreg;
12974 g_assert_not_reached ();
12976 reg2 = alloc_ireg (cfg);
12977 NEW_LOAD_MEMBASE (cfg, load2, OP_LOADI4_MEMBASE, reg2, reg1, G_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
12978 /* Load the locals area address */
12979 reg3 = alloc_ireg (cfg);
12980 if (locals_var->opcode == OP_REGOFFSET) {
12981 NEW_LOAD_MEMBASE (cfg, load3, OP_LOAD_MEMBASE, reg3, locals_var->inst_basereg, locals_var->inst_offset);
12982 } else if (locals_var->opcode == OP_REGVAR) {
12983 NEW_UNALU (cfg, load3, OP_MOVE, reg3, locals_var->dreg);
12985 g_assert_not_reached ();
12987 /* Compute the address */
12988 ins->opcode = OP_PADD;
12992 mono_bblock_insert_before_ins (bb, ins, load3);
12993 mono_bblock_insert_before_ins (bb, load3, load2);
12995 mono_bblock_insert_before_ins (bb, load2, load);
12997 g_assert (var->opcode == OP_REGOFFSET);
12999 ins->opcode = OP_ADD_IMM;
13000 ins->sreg1 = var->inst_basereg;
13001 ins->inst_imm = var->inst_offset;
13004 *need_local_opts = TRUE;
13005 spec = INS_INFO (ins->opcode);
13008 if (ins->opcode < MONO_CEE_LAST) {
13009 mono_print_ins (ins);
13010 g_assert_not_reached ();
13014 * Store opcodes have destbasereg in the dreg, but in reality, it is an
13018 if (MONO_IS_STORE_MEMBASE (ins)) {
13019 tmp_reg = ins->dreg;
13020 ins->dreg = ins->sreg2;
13021 ins->sreg2 = tmp_reg;
13024 spec2 [MONO_INST_DEST] = ' ';
13025 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
13026 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
13027 spec2 [MONO_INST_SRC3] = ' ';
13029 } else if (MONO_IS_STORE_MEMINDEX (ins))
13030 g_assert_not_reached ();
13035 if (G_UNLIKELY (cfg->verbose_level > 2)) {
13036 printf ("\t %.3s %d", spec, ins->dreg);
13037 num_sregs = mono_inst_get_src_registers (ins, sregs);
13038 for (srcindex = 0; srcindex < num_sregs; ++srcindex)
13039 printf (" %d", sregs [srcindex]);
13046 regtype = spec [MONO_INST_DEST];
13047 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
13050 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
13051 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
13052 MonoInst *store_ins;
13054 MonoInst *def_ins = ins;
13055 int dreg = ins->dreg; /* The original vreg */
13057 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
13059 if (var->opcode == OP_REGVAR) {
13060 ins->dreg = var->dreg;
13061 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
13063 * Instead of emitting a load+store, use a _membase opcode.
13065 g_assert (var->opcode == OP_REGOFFSET);
13066 if (ins->opcode == OP_MOVE) {
13070 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
13071 ins->inst_basereg = var->inst_basereg;
13072 ins->inst_offset = var->inst_offset;
13075 spec = INS_INFO (ins->opcode);
13079 g_assert (var->opcode == OP_REGOFFSET);
13081 prev_dreg = ins->dreg;
13083 /* Invalidate any previous lvreg for this vreg */
13084 vreg_to_lvreg [ins->dreg] = 0;
13088 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
13090 store_opcode = OP_STOREI8_MEMBASE_REG;
13093 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
13095 #if SIZEOF_REGISTER != 8
13096 if (regtype == 'l') {
13097 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
13098 mono_bblock_insert_after_ins (bb, ins, store_ins);
13099 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
13100 mono_bblock_insert_after_ins (bb, ins, store_ins);
13101 def_ins = store_ins;
13106 g_assert (store_opcode != OP_STOREV_MEMBASE);
13108 /* Try to fuse the store into the instruction itself */
13109 /* FIXME: Add more instructions */
13110 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
13111 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
13112 ins->inst_imm = ins->inst_c0;
13113 ins->inst_destbasereg = var->inst_basereg;
13114 ins->inst_offset = var->inst_offset;
13115 spec = INS_INFO (ins->opcode);
13116 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
13117 ins->opcode = store_opcode;
13118 ins->inst_destbasereg = var->inst_basereg;
13119 ins->inst_offset = var->inst_offset;
13123 tmp_reg = ins->dreg;
13124 ins->dreg = ins->sreg2;
13125 ins->sreg2 = tmp_reg;
13128 spec2 [MONO_INST_DEST] = ' ';
13129 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
13130 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
13131 spec2 [MONO_INST_SRC3] = ' ';
13133 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
13134 // FIXME: The backends expect the base reg to be in inst_basereg
13135 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
13137 ins->inst_basereg = var->inst_basereg;
13138 ins->inst_offset = var->inst_offset;
13139 spec = INS_INFO (ins->opcode);
13141 /* printf ("INS: "); mono_print_ins (ins); */
13142 /* Create a store instruction */
13143 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
13145 /* Insert it after the instruction */
13146 mono_bblock_insert_after_ins (bb, ins, store_ins);
13148 def_ins = store_ins;
13151 * We can't assign ins->dreg to var->dreg here, since the
13152 * sregs could use it. So set a flag, and do it after
13155 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
13156 dest_has_lvreg = TRUE;
13161 if (def_ins && !live_range_start [dreg]) {
13162 live_range_start [dreg] = def_ins;
13163 live_range_start_bb [dreg] = bb;
13166 if (cfg->compute_gc_maps && def_ins && (var->flags & MONO_INST_GC_TRACK)) {
13169 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_DEF);
13170 tmp->inst_c1 = dreg;
13171 mono_bblock_insert_after_ins (bb, def_ins, tmp);
13178 num_sregs = mono_inst_get_src_registers (ins, sregs);
13179 for (srcindex = 0; srcindex < 3; ++srcindex) {
13180 regtype = spec [MONO_INST_SRC1 + srcindex];
13181 sreg = sregs [srcindex];
13183 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
13184 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
13185 MonoInst *var = get_vreg_to_inst (cfg, sreg);
13186 MonoInst *use_ins = ins;
13187 MonoInst *load_ins;
13188 guint32 load_opcode;
13190 if (var->opcode == OP_REGVAR) {
13191 sregs [srcindex] = var->dreg;
13192 //mono_inst_set_src_registers (ins, sregs);
13193 live_range_end [sreg] = use_ins;
13194 live_range_end_bb [sreg] = bb;
13196 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
13199 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
13200 /* var->dreg is a hreg */
13201 tmp->inst_c1 = sreg;
13202 mono_bblock_insert_after_ins (bb, ins, tmp);
13208 g_assert (var->opcode == OP_REGOFFSET);
13210 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
13212 g_assert (load_opcode != OP_LOADV_MEMBASE);
13214 if (vreg_to_lvreg [sreg]) {
13215 g_assert (vreg_to_lvreg [sreg] != -1);
13217 /* The variable is already loaded to an lvreg */
13218 if (G_UNLIKELY (cfg->verbose_level > 2))
13219 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
13220 sregs [srcindex] = vreg_to_lvreg [sreg];
13221 //mono_inst_set_src_registers (ins, sregs);
13225 /* Try to fuse the load into the instruction */
13226 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
13227 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
13228 sregs [0] = var->inst_basereg;
13229 //mono_inst_set_src_registers (ins, sregs);
13230 ins->inst_offset = var->inst_offset;
13231 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
13232 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
13233 sregs [1] = var->inst_basereg;
13234 //mono_inst_set_src_registers (ins, sregs);
13235 ins->inst_offset = var->inst_offset;
13237 if (MONO_IS_REAL_MOVE (ins)) {
13238 ins->opcode = OP_NOP;
13241 //printf ("%d ", srcindex); mono_print_ins (ins);
13243 sreg = alloc_dreg (cfg, stacktypes [regtype]);
13245 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
13246 if (var->dreg == prev_dreg) {
13248 * sreg refers to the value loaded by the load
13249 * emitted below, but we need to use ins->dreg
13250 * since it refers to the store emitted earlier.
13254 g_assert (sreg != -1);
13255 vreg_to_lvreg [var->dreg] = sreg;
13256 g_assert (lvregs_len < 1024);
13257 lvregs [lvregs_len ++] = var->dreg;
13261 sregs [srcindex] = sreg;
13262 //mono_inst_set_src_registers (ins, sregs);
13264 #if SIZEOF_REGISTER != 8
13265 if (regtype == 'l') {
13266 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
13267 mono_bblock_insert_before_ins (bb, ins, load_ins);
13268 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
13269 mono_bblock_insert_before_ins (bb, ins, load_ins);
13270 use_ins = load_ins;
13275 #if SIZEOF_REGISTER == 4
13276 g_assert (load_opcode != OP_LOADI8_MEMBASE);
13278 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
13279 mono_bblock_insert_before_ins (bb, ins, load_ins);
13280 use_ins = load_ins;
13284 if (var->dreg < orig_next_vreg) {
13285 live_range_end [var->dreg] = use_ins;
13286 live_range_end_bb [var->dreg] = bb;
13289 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
13292 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
13293 tmp->inst_c1 = var->dreg;
13294 mono_bblock_insert_after_ins (bb, ins, tmp);
13298 mono_inst_set_src_registers (ins, sregs);
13300 if (dest_has_lvreg) {
13301 g_assert (ins->dreg != -1);
13302 vreg_to_lvreg [prev_dreg] = ins->dreg;
13303 g_assert (lvregs_len < 1024);
13304 lvregs [lvregs_len ++] = prev_dreg;
13305 dest_has_lvreg = FALSE;
13309 tmp_reg = ins->dreg;
13310 ins->dreg = ins->sreg2;
13311 ins->sreg2 = tmp_reg;
13314 if (MONO_IS_CALL (ins)) {
13315 /* Clear vreg_to_lvreg array */
13316 for (i = 0; i < lvregs_len; i++)
13317 vreg_to_lvreg [lvregs [i]] = 0;
13319 } else if (ins->opcode == OP_NOP) {
13321 MONO_INST_NULLIFY_SREGS (ins);
13324 if (cfg->verbose_level > 2)
13325 mono_print_ins_index (1, ins);
13328 /* Extend the live range based on the liveness info */
13329 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
13330 for (i = 0; i < cfg->num_varinfo; i ++) {
13331 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
13333 if (vreg_is_volatile (cfg, vi->vreg))
13334 /* The liveness info is incomplete */
13337 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
13338 /* Live from at least the first ins of this bb */
13339 live_range_start [vi->vreg] = bb->code;
13340 live_range_start_bb [vi->vreg] = bb;
13343 if (mono_bitset_test_fast (bb->live_out_set, i)) {
13344 /* Live at least until the last ins of this bb */
13345 live_range_end [vi->vreg] = bb->last_ins;
13346 live_range_end_bb [vi->vreg] = bb;
13352 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
13354 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
13355 * by storing the current native offset into MonoMethodVar->live_range_start/end.
13357 if (cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
13358 for (i = 0; i < cfg->num_varinfo; ++i) {
13359 int vreg = MONO_VARINFO (cfg, i)->vreg;
13362 if (live_range_start [vreg]) {
13363 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
13365 ins->inst_c1 = vreg;
13366 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
13368 if (live_range_end [vreg]) {
13369 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
13371 ins->inst_c1 = vreg;
13372 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
13373 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
13375 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
13381 if (cfg->gsharedvt_locals_var_ins) {
13382 /* Nullify if unused */
13383 cfg->gsharedvt_locals_var_ins->opcode = OP_PCONST;
13384 cfg->gsharedvt_locals_var_ins->inst_imm = 0;
13387 g_free (live_range_start);
13388 g_free (live_range_end);
13389 g_free (live_range_start_bb);
13390 g_free (live_range_end_bb);
13395 * - use 'iadd' instead of 'int_add'
13396 * - handling ovf opcodes: decompose in method_to_ir.
13397 * - unify iregs/fregs
13398 * -> partly done, the missing parts are:
13399 * - a more complete unification would involve unifying the hregs as well, so
13400 * code wouldn't need if (fp) all over the place. but that would mean the hregs
13401 * would no longer map to the machine hregs, so the code generators would need to
13402 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
13403 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
13404 * fp/non-fp branches speeds it up by about 15%.
13405 * - use sext/zext opcodes instead of shifts
13407 * - get rid of TEMPLOADs if possible and use vregs instead
13408 * - clean up usage of OP_P/OP_ opcodes
13409 * - cleanup usage of DUMMY_USE
13410 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
13412 * - set the stack type and allocate a dreg in the EMIT_NEW macros
13413 * - get rid of all the <foo>2 stuff when the new JIT is ready.
13414 * - make sure handle_stack_args () is called before the branch is emitted
13415 * - when the new IR is done, get rid of all unused stuff
13416 * - COMPARE/BEQ as separate instructions or unify them ?
13417 * - keeping them separate allows specialized compare instructions like
13418 * compare_imm, compare_membase
13419 * - most back ends unify fp compare+branch, fp compare+ceq
13420 * - integrate mono_save_args into inline_method
13421 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
13422 * - handle long shift opts on 32 bit platforms somehow: they require
13423 * 3 sregs (2 for arg1 and 1 for arg2)
13424 * - make byref a 'normal' type.
13425 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
13426 * variable if needed.
13427 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
13428 * like inline_method.
13429 * - remove inlining restrictions
13430 * - fix LNEG and enable cfold of INEG
13431 * - generalize x86 optimizations like ldelema as a peephole optimization
13432 * - add store_mem_imm for amd64
13433 * - optimize the loading of the interruption flag in the managed->native wrappers
13434 * - avoid special handling of OP_NOP in passes
13435 * - move code inserting instructions into one function/macro.
13436 * - try a coalescing phase after liveness analysis
13437 * - add float -> vreg conversion + local optimizations on !x86
13438 * - figure out how to handle decomposed branches during optimizations, ie.
13439 * compare+branch, op_jump_table+op_br etc.
13440 * - promote RuntimeXHandles to vregs
13441 * - vtype cleanups:
13442 * - add a NEW_VARLOADA_VREG macro
13443 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
13444 * accessing vtype fields.
13445 * - get rid of I8CONST on 64 bit platforms
13446 * - dealing with the increase in code size due to branches created during opcode
13448 * - use extended basic blocks
13449 * - all parts of the JIT
13450 * - handle_global_vregs () && local regalloc
13451 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
13452 * - sources of increase in code size:
13455 * - isinst and castclass
13456 * - lvregs not allocated to global registers even if used multiple times
13457 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
13459 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
13460 * - add all micro optimizations from the old JIT
13461 * - put tree optimizations into the deadce pass
13462 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
13463 * specific function.
13464 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
13465 * fcompare + branchCC.
13466 * - create a helper function for allocating a stack slot, taking into account
13467 * MONO_CFG_HAS_SPILLUP.
13469 * - merge the ia64 switch changes.
13470 * - optimize mono_regstate2_alloc_int/float.
13471 * - fix the pessimistic handling of variables accessed in exception handler blocks.
13472 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
13473 * parts of the tree could be separated by other instructions, killing the tree
13474 * arguments, or stores killing loads etc. Also, should we fold loads into other
13475 * instructions if the result of the load is used multiple times ?
13476 * - make the REM_IMM optimization in mini-x86.c arch-independent.
13477 * - LAST MERGE: 108395.
13478 * - when returning vtypes in registers, generate IR and append it to the end of the
13479 * last bb instead of doing it in the epilog.
13480 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
13488 - When to decompose opcodes:
13489 - earlier: this makes some optimizations hard to implement, since the low level IR
13490 no longer contains the neccessary information. But it is easier to do.
13491 - later: harder to implement, enables more optimizations.
13492 - Branches inside bblocks:
13493 - created when decomposing complex opcodes.
13494 - branches to another bblock: harmless, but not tracked by the branch
13495 optimizations, so need to branch to a label at the start of the bblock.
13496 - branches to inside the same bblock: very problematic, trips up the local
13497 reg allocator. Can be fixed by spitting the current bblock, but that is a
13498 complex operation, since some local vregs can become global vregs etc.
13499 - Local/global vregs:
13500 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
13501 local register allocator.
13502 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
13503 structure, created by mono_create_var (). Assigned to hregs or the stack by
13504 the global register allocator.
13505 - When to do optimizations like alu->alu_imm:
13506 - earlier -> saves work later on since the IR will be smaller/simpler
13507 - later -> can work on more instructions
13508 - Handling of valuetypes:
13509 - When a vtype is pushed on the stack, a new temporary is created, an
13510 instruction computing its address (LDADDR) is emitted and pushed on
13511 the stack. Need to optimize cases when the vtype is used immediately as in
13512 argument passing, stloc etc.
13513 - Instead of the to_end stuff in the old JIT, simply call the function handling
13514 the values on the stack before emitting the last instruction of the bb.
13517 #endif /* DISABLE_JIT */