2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
9 * Copyright 2003-2010 Novell, Inc (http://www.novell.com)
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
27 #ifdef HAVE_SYS_TIME_H
35 #include <mono/utils/memcheck.h>
37 #include <mono/metadata/assembly.h>
38 #include <mono/metadata/attrdefs.h>
39 #include <mono/metadata/loader.h>
40 #include <mono/metadata/tabledefs.h>
41 #include <mono/metadata/class.h>
42 #include <mono/metadata/object.h>
43 #include <mono/metadata/exception.h>
44 #include <mono/metadata/opcodes.h>
45 #include <mono/metadata/mono-endian.h>
46 #include <mono/metadata/tokentype.h>
47 #include <mono/metadata/tabledefs.h>
48 #include <mono/metadata/marshal.h>
49 #include <mono/metadata/debug-helpers.h>
50 #include <mono/metadata/mono-debug.h>
51 #include <mono/metadata/gc-internal.h>
52 #include <mono/metadata/security-manager.h>
53 #include <mono/metadata/threads-types.h>
54 #include <mono/metadata/security-core-clr.h>
55 #include <mono/metadata/monitor.h>
56 #include <mono/metadata/profiler-private.h>
57 #include <mono/metadata/profiler.h>
58 #include <mono/metadata/debug-mono-symfile.h>
59 #include <mono/utils/mono-compiler.h>
60 #include <mono/utils/mono-memory-model.h>
61 #include <mono/metadata/mono-basic-block.h>
68 #include "jit-icalls.h"
70 #include "debugger-agent.h"
72 #define BRANCH_COST 10
73 #define INLINE_LENGTH_LIMIT 20
74 #define INLINE_FAILURE(msg) do { \
75 if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE)) { \
76 if (cfg->verbose_level >= 2) \
77 printf ("inline failed: %s\n", msg); \
78 goto inline_failure; \
81 #define CHECK_CFG_EXCEPTION do {\
82 if (cfg->exception_type != MONO_EXCEPTION_NONE)\
85 #define METHOD_ACCESS_FAILURE do { \
86 char *method_fname = mono_method_full_name (method, TRUE); \
87 char *cil_method_fname = mono_method_full_name (cil_method, TRUE); \
88 mono_cfg_set_exception (cfg, MONO_EXCEPTION_METHOD_ACCESS); \
89 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname); \
90 g_free (method_fname); \
91 g_free (cil_method_fname); \
92 goto exception_exit; \
94 #define FIELD_ACCESS_FAILURE do { \
95 char *method_fname = mono_method_full_name (method, TRUE); \
96 char *field_fname = mono_field_full_name (field); \
97 mono_cfg_set_exception (cfg, MONO_EXCEPTION_FIELD_ACCESS); \
98 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); \
99 g_free (method_fname); \
100 g_free (field_fname); \
101 goto exception_exit; \
103 #define GENERIC_SHARING_FAILURE(opcode) do { \
104 if (cfg->generic_sharing_context) { \
105 if (cfg->verbose_level > 2) \
106 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __LINE__); \
107 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
108 goto exception_exit; \
111 #define GSHAREDVT_FAILURE(opcode) do { \
112 if (cfg->gsharedvt) { \
113 cfg->exception_message = g_strdup_printf ("gsharedvt failed for method %s.%s.%s/%d opcode %s %s:%d", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __FILE__, __LINE__); \
114 if (cfg->verbose_level >= 2) \
115 printf ("%s\n", cfg->exception_message); \
116 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
117 goto exception_exit; \
120 #define OUT_OF_MEMORY_FAILURE do { \
121 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OUT_OF_MEMORY); \
122 goto exception_exit; \
124 #define DISABLE_AOT(cfg) do { \
125 if ((cfg)->verbose_level >= 2) \
126 printf ("AOT disabled: %s:%d\n", __FILE__, __LINE__); \
127 (cfg)->disable_aot = TRUE; \
130 /* Determine whenever 'ins' represents a load of the 'this' argument */
131 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
133 static int ldind_to_load_membase (int opcode);
134 static int stind_to_store_membase (int opcode);
136 int mono_op_to_op_imm (int opcode);
137 int mono_op_to_op_imm_noemul (int opcode);
139 MONO_API MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
141 /* helper methods signatures */
142 static MonoMethodSignature *helper_sig_class_init_trampoline = NULL;
143 static MonoMethodSignature *helper_sig_domain_get = NULL;
144 static MonoMethodSignature *helper_sig_generic_class_init_trampoline = NULL;
145 static MonoMethodSignature *helper_sig_generic_class_init_trampoline_llvm = NULL;
146 static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline = NULL;
147 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline = NULL;
148 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline_llvm = NULL;
151 * Instruction metadata
159 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
160 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
166 #if SIZEOF_REGISTER == 8 && SIZEOF_REGISTER == SIZEOF_VOID_P
171 /* keep in sync with the enum in mini.h */
174 #include "mini-ops.h"
179 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
180 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
182 * This should contain the index of the last sreg + 1. This is not the same
183 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
185 const gint8 ins_sreg_counts[] = {
186 #include "mini-ops.h"
191 #define MONO_INIT_VARINFO(vi,id) do { \
192 (vi)->range.first_use.pos.bid = 0xffff; \
198 mono_inst_set_src_registers (MonoInst *ins, int *regs)
200 ins->sreg1 = regs [0];
201 ins->sreg2 = regs [1];
202 ins->sreg3 = regs [2];
206 mono_alloc_ireg (MonoCompile *cfg)
208 return alloc_ireg (cfg);
212 mono_alloc_freg (MonoCompile *cfg)
214 return alloc_freg (cfg);
218 mono_alloc_preg (MonoCompile *cfg)
220 return alloc_preg (cfg);
224 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
226 return alloc_dreg (cfg, stack_type);
230 * mono_alloc_ireg_ref:
232 * Allocate an IREG, and mark it as holding a GC ref.
235 mono_alloc_ireg_ref (MonoCompile *cfg)
237 return alloc_ireg_ref (cfg);
241 * mono_alloc_ireg_mp:
243 * Allocate an IREG, and mark it as holding a managed pointer.
246 mono_alloc_ireg_mp (MonoCompile *cfg)
248 return alloc_ireg_mp (cfg);
252 * mono_alloc_ireg_copy:
254 * Allocate an IREG with the same GC type as VREG.
257 mono_alloc_ireg_copy (MonoCompile *cfg, guint32 vreg)
259 if (vreg_is_ref (cfg, vreg))
260 return alloc_ireg_ref (cfg);
261 else if (vreg_is_mp (cfg, vreg))
262 return alloc_ireg_mp (cfg);
264 return alloc_ireg (cfg);
268 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
273 type = mini_replace_type (type);
275 switch (type->type) {
278 case MONO_TYPE_BOOLEAN:
290 case MONO_TYPE_FNPTR:
292 case MONO_TYPE_CLASS:
293 case MONO_TYPE_STRING:
294 case MONO_TYPE_OBJECT:
295 case MONO_TYPE_SZARRAY:
296 case MONO_TYPE_ARRAY:
300 #if SIZEOF_REGISTER == 8
309 case MONO_TYPE_VALUETYPE:
310 if (type->data.klass->enumtype) {
311 type = mono_class_enum_basetype (type->data.klass);
314 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
317 case MONO_TYPE_TYPEDBYREF:
319 case MONO_TYPE_GENERICINST:
320 type = &type->data.generic_class->container_class->byval_arg;
324 g_assert (cfg->generic_sharing_context);
325 if (mini_type_var_is_vt (cfg, type))
330 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
336 mono_print_bb (MonoBasicBlock *bb, const char *msg)
341 printf ("\n%s %d: [IN: ", msg, bb->block_num);
342 for (i = 0; i < bb->in_count; ++i)
343 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
345 for (i = 0; i < bb->out_count; ++i)
346 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
348 for (tree = bb->code; tree; tree = tree->next)
349 mono_print_ins_index (-1, tree);
353 mono_create_helper_signatures (void)
355 helper_sig_domain_get = mono_create_icall_signature ("ptr");
356 helper_sig_class_init_trampoline = mono_create_icall_signature ("void");
357 helper_sig_generic_class_init_trampoline = mono_create_icall_signature ("void");
358 helper_sig_generic_class_init_trampoline_llvm = mono_create_icall_signature ("void ptr");
359 helper_sig_rgctx_lazy_fetch_trampoline = mono_create_icall_signature ("ptr ptr");
360 helper_sig_monitor_enter_exit_trampoline = mono_create_icall_signature ("void");
361 helper_sig_monitor_enter_exit_trampoline_llvm = mono_create_icall_signature ("void object");
365 * When using gsharedvt, some instatiations might be verifiable, and some might be not. i.e.
366 * foo<T> (int i) { ldarg.0; box T; }
368 #define UNVERIFIED do { \
369 if (cfg->gsharedvt) { \
370 if (cfg->verbose_level > 2) \
371 printf ("gsharedvt method failed to verify, falling back to instantiation.\n"); \
372 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
373 goto exception_exit; \
375 if (mini_get_debug_options ()->break_on_unverified) \
381 #define LOAD_ERROR do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto load_error; } while (0)
383 #define TYPE_LOAD_ERROR(klass) do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else { cfg->exception_ptr = klass; goto load_error; } } while (0)
385 #define GET_BBLOCK(cfg,tblock,ip) do { \
386 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
388 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
389 NEW_BBLOCK (cfg, (tblock)); \
390 (tblock)->cil_code = (ip); \
391 ADD_BBLOCK (cfg, (tblock)); \
395 #if defined(TARGET_X86) || defined(TARGET_AMD64)
396 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
397 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
398 (dest)->dreg = alloc_ireg_mp ((cfg)); \
399 (dest)->sreg1 = (sr1); \
400 (dest)->sreg2 = (sr2); \
401 (dest)->inst_imm = (imm); \
402 (dest)->backend.shift_amount = (shift); \
403 MONO_ADD_INS ((cfg)->cbb, (dest)); \
407 #if SIZEOF_REGISTER == 8
408 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
409 /* FIXME: Need to add many more cases */ \
410 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
412 int dr = alloc_preg (cfg); \
413 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
414 (ins)->sreg2 = widen->dreg; \
418 #define ADD_WIDEN_OP(ins, arg1, arg2)
421 #define ADD_BINOP(op) do { \
422 MONO_INST_NEW (cfg, ins, (op)); \
424 ins->sreg1 = sp [0]->dreg; \
425 ins->sreg2 = sp [1]->dreg; \
426 type_from_op (ins, sp [0], sp [1]); \
428 /* Have to insert a widening op */ \
429 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
430 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
431 MONO_ADD_INS ((cfg)->cbb, (ins)); \
432 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
435 #define ADD_UNOP(op) do { \
436 MONO_INST_NEW (cfg, ins, (op)); \
438 ins->sreg1 = sp [0]->dreg; \
439 type_from_op (ins, sp [0], NULL); \
441 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
442 MONO_ADD_INS ((cfg)->cbb, (ins)); \
443 *sp++ = mono_decompose_opcode (cfg, ins); \
446 #define ADD_BINCOND(next_block) do { \
449 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
450 cmp->sreg1 = sp [0]->dreg; \
451 cmp->sreg2 = sp [1]->dreg; \
452 type_from_op (cmp, sp [0], sp [1]); \
454 type_from_op (ins, sp [0], sp [1]); \
455 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
456 GET_BBLOCK (cfg, tblock, target); \
457 link_bblock (cfg, bblock, tblock); \
458 ins->inst_true_bb = tblock; \
459 if ((next_block)) { \
460 link_bblock (cfg, bblock, (next_block)); \
461 ins->inst_false_bb = (next_block); \
462 start_new_bblock = 1; \
464 GET_BBLOCK (cfg, tblock, ip); \
465 link_bblock (cfg, bblock, tblock); \
466 ins->inst_false_bb = tblock; \
467 start_new_bblock = 2; \
469 if (sp != stack_start) { \
470 handle_stack_args (cfg, stack_start, sp - stack_start); \
471 CHECK_UNVERIFIABLE (cfg); \
473 MONO_ADD_INS (bblock, cmp); \
474 MONO_ADD_INS (bblock, ins); \
478 * link_bblock: Links two basic blocks
480 * links two basic blocks in the control flow graph, the 'from'
481 * argument is the starting block and the 'to' argument is the block
482 * the control flow ends to after 'from'.
485 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
487 MonoBasicBlock **newa;
491 if (from->cil_code) {
493 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
495 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
498 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
500 printf ("edge from entry to exit\n");
505 for (i = 0; i < from->out_count; ++i) {
506 if (to == from->out_bb [i]) {
512 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
513 for (i = 0; i < from->out_count; ++i) {
514 newa [i] = from->out_bb [i];
522 for (i = 0; i < to->in_count; ++i) {
523 if (from == to->in_bb [i]) {
529 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
530 for (i = 0; i < to->in_count; ++i) {
531 newa [i] = to->in_bb [i];
540 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
542 link_bblock (cfg, from, to);
546 * mono_find_block_region:
548 * We mark each basic block with a region ID. We use that to avoid BB
549 * optimizations when blocks are in different regions.
552 * A region token that encodes where this region is, and information
553 * about the clause owner for this block.
555 * The region encodes the try/catch/filter clause that owns this block
556 * as well as the type. -1 is a special value that represents a block
557 * that is in none of try/catch/filter.
560 mono_find_block_region (MonoCompile *cfg, int offset)
562 MonoMethodHeader *header = cfg->header;
563 MonoExceptionClause *clause;
566 for (i = 0; i < header->num_clauses; ++i) {
567 clause = &header->clauses [i];
568 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
569 (offset < (clause->handler_offset)))
570 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
572 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
573 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
574 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
575 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
576 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
578 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
581 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
582 return ((i + 1) << 8) | clause->flags;
589 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
591 MonoMethodHeader *header = cfg->header;
592 MonoExceptionClause *clause;
596 for (i = 0; i < header->num_clauses; ++i) {
597 clause = &header->clauses [i];
598 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
599 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
600 if (clause->flags == type)
601 res = g_list_append (res, clause);
608 mono_create_spvar_for_region (MonoCompile *cfg, int region)
612 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
616 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
617 /* prevent it from being register allocated */
618 var->flags |= MONO_INST_VOLATILE;
620 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
624 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
626 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
630 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
634 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
638 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
639 /* prevent it from being register allocated */
640 var->flags |= MONO_INST_VOLATILE;
642 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
648 * Returns the type used in the eval stack when @type is loaded.
649 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
652 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
656 type = mini_replace_type (type);
657 inst->klass = klass = mono_class_from_mono_type (type);
659 inst->type = STACK_MP;
664 switch (type->type) {
666 inst->type = STACK_INV;
670 case MONO_TYPE_BOOLEAN:
676 inst->type = STACK_I4;
681 case MONO_TYPE_FNPTR:
682 inst->type = STACK_PTR;
684 case MONO_TYPE_CLASS:
685 case MONO_TYPE_STRING:
686 case MONO_TYPE_OBJECT:
687 case MONO_TYPE_SZARRAY:
688 case MONO_TYPE_ARRAY:
689 inst->type = STACK_OBJ;
693 inst->type = STACK_I8;
697 inst->type = STACK_R8;
699 case MONO_TYPE_VALUETYPE:
700 if (type->data.klass->enumtype) {
701 type = mono_class_enum_basetype (type->data.klass);
705 inst->type = STACK_VTYPE;
708 case MONO_TYPE_TYPEDBYREF:
709 inst->klass = mono_defaults.typed_reference_class;
710 inst->type = STACK_VTYPE;
712 case MONO_TYPE_GENERICINST:
713 type = &type->data.generic_class->container_class->byval_arg;
717 g_assert (cfg->generic_sharing_context);
718 if (mini_is_gsharedvt_type (cfg, type)) {
719 g_assert (cfg->gsharedvt);
720 inst->type = STACK_VTYPE;
722 inst->type = STACK_OBJ;
726 g_error ("unknown type 0x%02x in eval stack type", type->type);
731 * The following tables are used to quickly validate the IL code in type_from_op ().
734 bin_num_table [STACK_MAX] [STACK_MAX] = {
735 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
736 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
737 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
738 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
739 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV},
740 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
741 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
742 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
747 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV
750 /* reduce the size of this table */
752 bin_int_table [STACK_MAX] [STACK_MAX] = {
753 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
754 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
755 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
756 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
757 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
758 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
759 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
760 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
764 bin_comp_table [STACK_MAX] [STACK_MAX] = {
765 /* Inv i L p F & O vt */
767 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
768 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
769 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
770 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
771 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
772 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
773 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
776 /* reduce the size of this table */
778 shift_table [STACK_MAX] [STACK_MAX] = {
779 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
780 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
781 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
782 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
783 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
784 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
785 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
786 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
790 * Tables to map from the non-specific opcode to the matching
791 * type-specific opcode.
793 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
795 binops_op_map [STACK_MAX] = {
796 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD
799 /* handles from CEE_NEG to CEE_CONV_U8 */
801 unops_op_map [STACK_MAX] = {
802 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG
805 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
807 ovfops_op_map [STACK_MAX] = {
808 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2
811 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
813 ovf2ops_op_map [STACK_MAX] = {
814 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
817 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
819 ovf3ops_op_map [STACK_MAX] = {
820 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1
823 /* handles from CEE_BEQ to CEE_BLT_UN */
825 beqops_op_map [STACK_MAX] = {
826 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ
829 /* handles from CEE_CEQ to CEE_CLT_UN */
831 ceqops_op_map [STACK_MAX] = {
832 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ
836 * Sets ins->type (the type on the eval stack) according to the
837 * type of the opcode and the arguments to it.
838 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
840 * FIXME: this function sets ins->type unconditionally in some cases, but
841 * it should set it to invalid for some types (a conv.x on an object)
844 type_from_op (MonoInst *ins, MonoInst *src1, MonoInst *src2) {
846 switch (ins->opcode) {
853 /* FIXME: check unverifiable args for STACK_MP */
854 ins->type = bin_num_table [src1->type] [src2->type];
855 ins->opcode += binops_op_map [ins->type];
862 ins->type = bin_int_table [src1->type] [src2->type];
863 ins->opcode += binops_op_map [ins->type];
868 ins->type = shift_table [src1->type] [src2->type];
869 ins->opcode += binops_op_map [ins->type];
874 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
875 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
876 ins->opcode = OP_LCOMPARE;
877 else if (src1->type == STACK_R8)
878 ins->opcode = OP_FCOMPARE;
880 ins->opcode = OP_ICOMPARE;
882 case OP_ICOMPARE_IMM:
883 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
884 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
885 ins->opcode = OP_LCOMPARE_IMM;
897 ins->opcode += beqops_op_map [src1->type];
900 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
901 ins->opcode += ceqops_op_map [src1->type];
907 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
908 ins->opcode += ceqops_op_map [src1->type];
912 ins->type = neg_table [src1->type];
913 ins->opcode += unops_op_map [ins->type];
916 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
917 ins->type = src1->type;
919 ins->type = STACK_INV;
920 ins->opcode += unops_op_map [ins->type];
926 ins->type = STACK_I4;
927 ins->opcode += unops_op_map [src1->type];
930 ins->type = STACK_R8;
931 switch (src1->type) {
934 ins->opcode = OP_ICONV_TO_R_UN;
937 ins->opcode = OP_LCONV_TO_R_UN;
941 case CEE_CONV_OVF_I1:
942 case CEE_CONV_OVF_U1:
943 case CEE_CONV_OVF_I2:
944 case CEE_CONV_OVF_U2:
945 case CEE_CONV_OVF_I4:
946 case CEE_CONV_OVF_U4:
947 ins->type = STACK_I4;
948 ins->opcode += ovf3ops_op_map [src1->type];
950 case CEE_CONV_OVF_I_UN:
951 case CEE_CONV_OVF_U_UN:
952 ins->type = STACK_PTR;
953 ins->opcode += ovf2ops_op_map [src1->type];
955 case CEE_CONV_OVF_I1_UN:
956 case CEE_CONV_OVF_I2_UN:
957 case CEE_CONV_OVF_I4_UN:
958 case CEE_CONV_OVF_U1_UN:
959 case CEE_CONV_OVF_U2_UN:
960 case CEE_CONV_OVF_U4_UN:
961 ins->type = STACK_I4;
962 ins->opcode += ovf2ops_op_map [src1->type];
965 ins->type = STACK_PTR;
966 switch (src1->type) {
968 ins->opcode = OP_ICONV_TO_U;
972 #if SIZEOF_VOID_P == 8
973 ins->opcode = OP_LCONV_TO_U;
975 ins->opcode = OP_MOVE;
979 ins->opcode = OP_LCONV_TO_U;
982 ins->opcode = OP_FCONV_TO_U;
988 ins->type = STACK_I8;
989 ins->opcode += unops_op_map [src1->type];
991 case CEE_CONV_OVF_I8:
992 case CEE_CONV_OVF_U8:
993 ins->type = STACK_I8;
994 ins->opcode += ovf3ops_op_map [src1->type];
996 case CEE_CONV_OVF_U8_UN:
997 case CEE_CONV_OVF_I8_UN:
998 ins->type = STACK_I8;
999 ins->opcode += ovf2ops_op_map [src1->type];
1003 ins->type = STACK_R8;
1004 ins->opcode += unops_op_map [src1->type];
1007 ins->type = STACK_R8;
1011 ins->type = STACK_I4;
1012 ins->opcode += ovfops_op_map [src1->type];
1015 case CEE_CONV_OVF_I:
1016 case CEE_CONV_OVF_U:
1017 ins->type = STACK_PTR;
1018 ins->opcode += ovfops_op_map [src1->type];
1021 case CEE_ADD_OVF_UN:
1023 case CEE_MUL_OVF_UN:
1025 case CEE_SUB_OVF_UN:
1026 ins->type = bin_num_table [src1->type] [src2->type];
1027 ins->opcode += ovfops_op_map [src1->type];
1028 if (ins->type == STACK_R8)
1029 ins->type = STACK_INV;
1031 case OP_LOAD_MEMBASE:
1032 ins->type = STACK_PTR;
1034 case OP_LOADI1_MEMBASE:
1035 case OP_LOADU1_MEMBASE:
1036 case OP_LOADI2_MEMBASE:
1037 case OP_LOADU2_MEMBASE:
1038 case OP_LOADI4_MEMBASE:
1039 case OP_LOADU4_MEMBASE:
1040 ins->type = STACK_PTR;
1042 case OP_LOADI8_MEMBASE:
1043 ins->type = STACK_I8;
1045 case OP_LOADR4_MEMBASE:
1046 case OP_LOADR8_MEMBASE:
1047 ins->type = STACK_R8;
1050 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
1054 if (ins->type == STACK_MP)
1055 ins->klass = mono_defaults.object_class;
1060 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
1066 param_table [STACK_MAX] [STACK_MAX] = {
1071 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
1075 switch (args->type) {
1085 for (i = 0; i < sig->param_count; ++i) {
1086 switch (args [i].type) {
1090 if (!sig->params [i]->byref)
1094 if (sig->params [i]->byref)
1096 switch (sig->params [i]->type) {
1097 case MONO_TYPE_CLASS:
1098 case MONO_TYPE_STRING:
1099 case MONO_TYPE_OBJECT:
1100 case MONO_TYPE_SZARRAY:
1101 case MONO_TYPE_ARRAY:
1108 if (sig->params [i]->byref)
1110 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1119 /*if (!param_table [args [i].type] [sig->params [i]->type])
1127 * When we need a pointer to the current domain many times in a method, we
1128 * call mono_domain_get() once and we store the result in a local variable.
1129 * This function returns the variable that represents the MonoDomain*.
1131 inline static MonoInst *
1132 mono_get_domainvar (MonoCompile *cfg)
1134 if (!cfg->domainvar)
1135 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1136 return cfg->domainvar;
1140 * The got_var contains the address of the Global Offset Table when AOT
1144 mono_get_got_var (MonoCompile *cfg)
1146 #ifdef MONO_ARCH_NEED_GOT_VAR
1147 if (!cfg->compile_aot)
1149 if (!cfg->got_var) {
1150 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1152 return cfg->got_var;
1159 mono_get_vtable_var (MonoCompile *cfg)
1161 g_assert (cfg->generic_sharing_context);
1163 if (!cfg->rgctx_var) {
1164 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1165 /* force the var to be stack allocated */
1166 cfg->rgctx_var->flags |= MONO_INST_VOLATILE;
1169 return cfg->rgctx_var;
1173 type_from_stack_type (MonoInst *ins) {
1174 switch (ins->type) {
1175 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1176 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1177 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1178 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1180 return &ins->klass->this_arg;
1181 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1182 case STACK_VTYPE: return &ins->klass->byval_arg;
1184 g_error ("stack type %d to monotype not handled\n", ins->type);
1189 static G_GNUC_UNUSED int
1190 type_to_stack_type (MonoType *t)
1192 t = mono_type_get_underlying_type (t);
1196 case MONO_TYPE_BOOLEAN:
1199 case MONO_TYPE_CHAR:
1206 case MONO_TYPE_FNPTR:
1208 case MONO_TYPE_CLASS:
1209 case MONO_TYPE_STRING:
1210 case MONO_TYPE_OBJECT:
1211 case MONO_TYPE_SZARRAY:
1212 case MONO_TYPE_ARRAY:
1220 case MONO_TYPE_VALUETYPE:
1221 case MONO_TYPE_TYPEDBYREF:
1223 case MONO_TYPE_GENERICINST:
1224 if (mono_type_generic_inst_is_valuetype (t))
1230 g_assert_not_reached ();
1237 array_access_to_klass (int opcode)
1241 return mono_defaults.byte_class;
1243 return mono_defaults.uint16_class;
1246 return mono_defaults.int_class;
1249 return mono_defaults.sbyte_class;
1252 return mono_defaults.int16_class;
1255 return mono_defaults.int32_class;
1257 return mono_defaults.uint32_class;
1260 return mono_defaults.int64_class;
1263 return mono_defaults.single_class;
1266 return mono_defaults.double_class;
1267 case CEE_LDELEM_REF:
1268 case CEE_STELEM_REF:
1269 return mono_defaults.object_class;
1271 g_assert_not_reached ();
1277 * We try to share variables when possible
1280 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1285 /* inlining can result in deeper stacks */
1286 if (slot >= cfg->header->max_stack)
1287 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1289 pos = ins->type - 1 + slot * STACK_MAX;
1291 switch (ins->type) {
1298 if ((vnum = cfg->intvars [pos]))
1299 return cfg->varinfo [vnum];
1300 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1301 cfg->intvars [pos] = res->inst_c0;
1304 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1310 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1313 * Don't use this if a generic_context is set, since that means AOT can't
1314 * look up the method using just the image+token.
1315 * table == 0 means this is a reference made from a wrapper.
1317 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1318 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1319 jump_info_token->image = image;
1320 jump_info_token->token = token;
1321 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1326 * This function is called to handle items that are left on the evaluation stack
1327 * at basic block boundaries. What happens is that we save the values to local variables
1328 * and we reload them later when first entering the target basic block (with the
1329 * handle_loaded_temps () function).
1330 * A single joint point will use the same variables (stored in the array bb->out_stack or
1331 * bb->in_stack, if the basic block is before or after the joint point).
1333 * This function needs to be called _before_ emitting the last instruction of
1334 * the bb (i.e. before emitting a branch).
1335 * If the stack merge fails at a join point, cfg->unverifiable is set.
1338 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1341 MonoBasicBlock *bb = cfg->cbb;
1342 MonoBasicBlock *outb;
1343 MonoInst *inst, **locals;
1348 if (cfg->verbose_level > 3)
1349 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1350 if (!bb->out_scount) {
1351 bb->out_scount = count;
1352 //printf ("bblock %d has out:", bb->block_num);
1354 for (i = 0; i < bb->out_count; ++i) {
1355 outb = bb->out_bb [i];
1356 /* exception handlers are linked, but they should not be considered for stack args */
1357 if (outb->flags & BB_EXCEPTION_HANDLER)
1359 //printf (" %d", outb->block_num);
1360 if (outb->in_stack) {
1362 bb->out_stack = outb->in_stack;
1368 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1369 for (i = 0; i < count; ++i) {
1371 * try to reuse temps already allocated for this purpouse, if they occupy the same
1372 * stack slot and if they are of the same type.
1373 * This won't cause conflicts since if 'local' is used to
1374 * store one of the values in the in_stack of a bblock, then
1375 * the same variable will be used for the same outgoing stack
1377 * This doesn't work when inlining methods, since the bblocks
1378 * in the inlined methods do not inherit their in_stack from
1379 * the bblock they are inlined to. See bug #58863 for an
1382 if (cfg->inlined_method)
1383 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1385 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1390 for (i = 0; i < bb->out_count; ++i) {
1391 outb = bb->out_bb [i];
1392 /* exception handlers are linked, but they should not be considered for stack args */
1393 if (outb->flags & BB_EXCEPTION_HANDLER)
1395 if (outb->in_scount) {
1396 if (outb->in_scount != bb->out_scount) {
1397 cfg->unverifiable = TRUE;
1400 continue; /* check they are the same locals */
1402 outb->in_scount = count;
1403 outb->in_stack = bb->out_stack;
1406 locals = bb->out_stack;
1408 for (i = 0; i < count; ++i) {
1409 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1410 inst->cil_code = sp [i]->cil_code;
1411 sp [i] = locals [i];
1412 if (cfg->verbose_level > 3)
1413 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1417 * It is possible that the out bblocks already have in_stack assigned, and
1418 * the in_stacks differ. In this case, we will store to all the different
1425 /* Find a bblock which has a different in_stack */
1427 while (bindex < bb->out_count) {
1428 outb = bb->out_bb [bindex];
1429 /* exception handlers are linked, but they should not be considered for stack args */
1430 if (outb->flags & BB_EXCEPTION_HANDLER) {
1434 if (outb->in_stack != locals) {
1435 for (i = 0; i < count; ++i) {
1436 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1437 inst->cil_code = sp [i]->cil_code;
1438 sp [i] = locals [i];
1439 if (cfg->verbose_level > 3)
1440 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1442 locals = outb->in_stack;
1451 /* Emit code which loads interface_offsets [klass->interface_id]
1452 * The array is stored in memory before vtable.
1455 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1457 if (cfg->compile_aot) {
1458 int ioffset_reg = alloc_preg (cfg);
1459 int iid_reg = alloc_preg (cfg);
1461 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1462 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1463 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1466 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1471 mini_emit_interface_bitmap_check (MonoCompile *cfg, int intf_bit_reg, int base_reg, int offset, MonoClass *klass)
1473 int ibitmap_reg = alloc_preg (cfg);
1474 #ifdef COMPRESSED_INTERFACE_BITMAP
1476 MonoInst *res, *ins;
1477 NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, ibitmap_reg, base_reg, offset);
1478 MONO_ADD_INS (cfg->cbb, ins);
1480 if (cfg->compile_aot)
1481 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_IID, klass);
1483 EMIT_NEW_ICONST (cfg, args [1], klass->interface_id);
1484 res = mono_emit_jit_icall (cfg, mono_class_interface_match, args);
1485 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, intf_bit_reg, res->dreg);
1487 int ibitmap_byte_reg = alloc_preg (cfg);
1489 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, base_reg, offset);
1491 if (cfg->compile_aot) {
1492 int iid_reg = alloc_preg (cfg);
1493 int shifted_iid_reg = alloc_preg (cfg);
1494 int ibitmap_byte_address_reg = alloc_preg (cfg);
1495 int masked_iid_reg = alloc_preg (cfg);
1496 int iid_one_bit_reg = alloc_preg (cfg);
1497 int iid_bit_reg = alloc_preg (cfg);
1498 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1499 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1500 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1501 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1502 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1503 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1504 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1505 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1507 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1508 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1514 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1515 * stored in "klass_reg" implements the interface "klass".
1518 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1520 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, interface_bitmap), klass);
1524 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1525 * stored in "vtable_reg" implements the interface "klass".
1528 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1530 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, interface_bitmap), klass);
1534 * Emit code which checks whenever the interface id of @klass is smaller than
1535 * than the value given by max_iid_reg.
1538 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1539 MonoBasicBlock *false_target)
1541 if (cfg->compile_aot) {
1542 int iid_reg = alloc_preg (cfg);
1543 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1544 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1547 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1549 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1551 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1554 /* Same as above, but obtains max_iid from a vtable */
1556 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1557 MonoBasicBlock *false_target)
1559 int max_iid_reg = alloc_preg (cfg);
1561 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, max_interface_id));
1562 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1565 /* Same as above, but obtains max_iid from a klass */
1567 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1568 MonoBasicBlock *false_target)
1570 int max_iid_reg = alloc_preg (cfg);
1572 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, max_interface_id));
1573 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1577 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1579 int idepth_reg = alloc_preg (cfg);
1580 int stypes_reg = alloc_preg (cfg);
1581 int stype = alloc_preg (cfg);
1583 mono_class_setup_supertypes (klass);
1585 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1586 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1587 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1588 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1590 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1591 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1593 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1594 } else if (cfg->compile_aot) {
1595 int const_reg = alloc_preg (cfg);
1596 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1597 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1599 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1601 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1605 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1607 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1611 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1613 int intf_reg = alloc_preg (cfg);
1615 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1616 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1617 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1619 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1621 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1625 * Variant of the above that takes a register to the class, not the vtable.
1628 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1630 int intf_bit_reg = alloc_preg (cfg);
1632 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1633 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1634 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1636 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1638 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1642 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1645 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1646 } else if (cfg->compile_aot) {
1647 int const_reg = alloc_preg (cfg);
1648 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1649 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1651 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1653 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1657 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1659 mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1663 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1665 if (cfg->compile_aot) {
1666 int const_reg = alloc_preg (cfg);
1667 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1668 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1670 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1672 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1676 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1679 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1682 int rank_reg = alloc_preg (cfg);
1683 int eclass_reg = alloc_preg (cfg);
1685 g_assert (!klass_inst);
1686 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, rank));
1687 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1688 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1689 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1690 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
1691 if (klass->cast_class == mono_defaults.object_class) {
1692 int parent_reg = alloc_preg (cfg);
1693 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
1694 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1695 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1696 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1697 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1698 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1699 } else if (klass->cast_class == mono_defaults.enum_class) {
1700 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1701 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1702 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1704 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1705 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1708 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1709 /* Check that the object is a vector too */
1710 int bounds_reg = alloc_preg (cfg);
1711 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
1712 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1713 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1716 int idepth_reg = alloc_preg (cfg);
1717 int stypes_reg = alloc_preg (cfg);
1718 int stype = alloc_preg (cfg);
1720 mono_class_setup_supertypes (klass);
1722 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1723 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1724 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1725 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1727 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1728 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1729 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1734 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1736 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1740 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1744 g_assert (val == 0);
1749 if ((size <= 4) && (size <= align)) {
1752 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1755 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1758 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1760 #if SIZEOF_REGISTER == 8
1762 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1768 val_reg = alloc_preg (cfg);
1770 if (SIZEOF_REGISTER == 8)
1771 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1773 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1776 /* This could be optimized further if neccesary */
1778 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1785 #if !NO_UNALIGNED_ACCESS
1786 if (SIZEOF_REGISTER == 8) {
1788 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1793 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1801 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1806 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1811 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1818 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1825 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1826 g_assert (size < 10000);
1829 /* This could be optimized further if neccesary */
1831 cur_reg = alloc_preg (cfg);
1832 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1833 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1840 #if !NO_UNALIGNED_ACCESS
1841 if (SIZEOF_REGISTER == 8) {
1843 cur_reg = alloc_preg (cfg);
1844 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1845 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1854 cur_reg = alloc_preg (cfg);
1855 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1856 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1862 cur_reg = alloc_preg (cfg);
1863 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1864 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1870 cur_reg = alloc_preg (cfg);
1871 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1872 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1880 emit_tls_set (MonoCompile *cfg, int sreg1, int tls_key)
1884 if (cfg->compile_aot) {
1885 EMIT_NEW_TLS_OFFSETCONST (cfg, c, tls_key);
1886 MONO_INST_NEW (cfg, ins, OP_TLS_SET_REG);
1888 ins->sreg2 = c->dreg;
1889 MONO_ADD_INS (cfg->cbb, ins);
1891 MONO_INST_NEW (cfg, ins, OP_TLS_SET);
1893 ins->inst_offset = mini_get_tls_offset (tls_key);
1894 MONO_ADD_INS (cfg->cbb, ins);
1901 * Emit IR to push the current LMF onto the LMF stack.
1904 emit_push_lmf (MonoCompile *cfg)
1907 * Emit IR to push the LMF:
1908 * lmf_addr = <lmf_addr from tls>
1909 * lmf->lmf_addr = lmf_addr
1910 * lmf->prev_lmf = *lmf_addr
1913 int lmf_reg, prev_lmf_reg;
1914 MonoInst *ins, *lmf_ins;
1919 if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
1920 /* Load current lmf */
1921 lmf_ins = mono_get_lmf_intrinsic (cfg);
1923 MONO_ADD_INS (cfg->cbb, lmf_ins);
1924 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
1925 lmf_reg = ins->dreg;
1926 /* Save previous_lmf */
1927 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, G_STRUCT_OFFSET (MonoLMF, previous_lmf), lmf_ins->dreg);
1929 emit_tls_set (cfg, lmf_reg, TLS_KEY_LMF);
1932 * Store lmf_addr in a variable, so it can be allocated to a global register.
1934 if (!cfg->lmf_addr_var)
1935 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1938 ins = mono_get_jit_tls_intrinsic (cfg);
1940 int jit_tls_dreg = ins->dreg;
1942 MONO_ADD_INS (cfg->cbb, ins);
1943 lmf_reg = alloc_preg (cfg);
1944 EMIT_NEW_BIALU_IMM (cfg, lmf_ins, OP_PADD_IMM, lmf_reg, jit_tls_dreg, G_STRUCT_OFFSET (MonoJitTlsData, lmf));
1946 lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
1949 lmf_ins = mono_get_lmf_addr_intrinsic (cfg);
1951 MONO_ADD_INS (cfg->cbb, lmf_ins);
1953 lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
1955 lmf_ins->dreg = cfg->lmf_addr_var->dreg;
1957 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
1958 lmf_reg = ins->dreg;
1960 prev_lmf_reg = alloc_preg (cfg);
1961 /* Save previous_lmf */
1962 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, cfg->lmf_addr_var->dreg, 0);
1963 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, G_STRUCT_OFFSET (MonoLMF, previous_lmf), prev_lmf_reg);
1965 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, cfg->lmf_addr_var->dreg, 0, lmf_reg);
1972 * Emit IR to pop the current LMF from the LMF stack.
1975 emit_pop_lmf (MonoCompile *cfg)
1977 int lmf_reg, lmf_addr_reg, prev_lmf_reg;
1983 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
1984 lmf_reg = ins->dreg;
1986 if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
1987 /* Load previous_lmf */
1988 prev_lmf_reg = alloc_preg (cfg);
1989 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
1991 emit_tls_set (cfg, prev_lmf_reg, TLS_KEY_LMF);
1994 * Emit IR to pop the LMF:
1995 * *(lmf->lmf_addr) = lmf->prev_lmf
1997 /* This could be called before emit_push_lmf () */
1998 if (!cfg->lmf_addr_var)
1999 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2000 lmf_addr_reg = cfg->lmf_addr_var->dreg;
2002 prev_lmf_reg = alloc_preg (cfg);
2003 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
2004 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_addr_reg, 0, prev_lmf_reg);
2009 ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
2012 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2015 type = mini_get_basic_type_from_generic (gsctx, type);
2016 type = mini_replace_type (type);
2017 switch (type->type) {
2018 case MONO_TYPE_VOID:
2019 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALL_MEMBASE: OP_VOIDCALL;
2022 case MONO_TYPE_BOOLEAN:
2025 case MONO_TYPE_CHAR:
2028 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2032 case MONO_TYPE_FNPTR:
2033 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2034 case MONO_TYPE_CLASS:
2035 case MONO_TYPE_STRING:
2036 case MONO_TYPE_OBJECT:
2037 case MONO_TYPE_SZARRAY:
2038 case MONO_TYPE_ARRAY:
2039 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2042 return calli? OP_LCALL_REG: virt? OP_LCALL_MEMBASE: OP_LCALL;
2045 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
2046 case MONO_TYPE_VALUETYPE:
2047 if (type->data.klass->enumtype) {
2048 type = mono_class_enum_basetype (type->data.klass);
2051 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2052 case MONO_TYPE_TYPEDBYREF:
2053 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2054 case MONO_TYPE_GENERICINST:
2055 type = &type->data.generic_class->container_class->byval_arg;
2058 case MONO_TYPE_MVAR:
2060 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2062 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
2068 * target_type_is_incompatible:
2069 * @cfg: MonoCompile context
2071 * Check that the item @arg on the evaluation stack can be stored
2072 * in the target type (can be a local, or field, etc).
2073 * The cfg arg can be used to check if we need verification or just
2076 * Returns: non-0 value if arg can't be stored on a target.
2079 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
2081 MonoType *simple_type;
2084 target = mini_replace_type (target);
2085 if (target->byref) {
2086 /* FIXME: check that the pointed to types match */
2087 if (arg->type == STACK_MP)
2088 return arg->klass != mono_class_from_mono_type (target);
2089 if (arg->type == STACK_PTR)
2094 simple_type = mono_type_get_underlying_type (target);
2095 switch (simple_type->type) {
2096 case MONO_TYPE_VOID:
2100 case MONO_TYPE_BOOLEAN:
2103 case MONO_TYPE_CHAR:
2106 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
2110 /* STACK_MP is needed when setting pinned locals */
2111 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2116 case MONO_TYPE_FNPTR:
2118 * Some opcodes like ldloca returns 'transient pointers' which can be stored in
2119 * in native int. (#688008).
2121 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2124 case MONO_TYPE_CLASS:
2125 case MONO_TYPE_STRING:
2126 case MONO_TYPE_OBJECT:
2127 case MONO_TYPE_SZARRAY:
2128 case MONO_TYPE_ARRAY:
2129 if (arg->type != STACK_OBJ)
2131 /* FIXME: check type compatibility */
2135 if (arg->type != STACK_I8)
2140 if (arg->type != STACK_R8)
2143 case MONO_TYPE_VALUETYPE:
2144 if (arg->type != STACK_VTYPE)
2146 klass = mono_class_from_mono_type (simple_type);
2147 if (klass != arg->klass)
2150 case MONO_TYPE_TYPEDBYREF:
2151 if (arg->type != STACK_VTYPE)
2153 klass = mono_class_from_mono_type (simple_type);
2154 if (klass != arg->klass)
2157 case MONO_TYPE_GENERICINST:
2158 if (mono_type_generic_inst_is_valuetype (simple_type)) {
2159 if (arg->type != STACK_VTYPE)
2161 klass = mono_class_from_mono_type (simple_type);
2162 if (klass != arg->klass)
2166 if (arg->type != STACK_OBJ)
2168 /* FIXME: check type compatibility */
2172 case MONO_TYPE_MVAR:
2173 g_assert (cfg->generic_sharing_context);
2174 if (mini_type_var_is_vt (cfg, simple_type)) {
2175 if (arg->type != STACK_VTYPE)
2178 if (arg->type != STACK_OBJ)
2183 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
2189 * Prepare arguments for passing to a function call.
2190 * Return a non-zero value if the arguments can't be passed to the given
2192 * The type checks are not yet complete and some conversions may need
2193 * casts on 32 or 64 bit architectures.
2195 * FIXME: implement this using target_type_is_incompatible ()
2198 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
2200 MonoType *simple_type;
2204 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
2208 for (i = 0; i < sig->param_count; ++i) {
2209 if (sig->params [i]->byref) {
2210 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
2214 simple_type = sig->params [i];
2215 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
2217 switch (simple_type->type) {
2218 case MONO_TYPE_VOID:
2223 case MONO_TYPE_BOOLEAN:
2226 case MONO_TYPE_CHAR:
2229 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2235 case MONO_TYPE_FNPTR:
2236 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2239 case MONO_TYPE_CLASS:
2240 case MONO_TYPE_STRING:
2241 case MONO_TYPE_OBJECT:
2242 case MONO_TYPE_SZARRAY:
2243 case MONO_TYPE_ARRAY:
2244 if (args [i]->type != STACK_OBJ)
2249 if (args [i]->type != STACK_I8)
2254 if (args [i]->type != STACK_R8)
2257 case MONO_TYPE_VALUETYPE:
2258 if (simple_type->data.klass->enumtype) {
2259 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2262 if (args [i]->type != STACK_VTYPE)
2265 case MONO_TYPE_TYPEDBYREF:
2266 if (args [i]->type != STACK_VTYPE)
2269 case MONO_TYPE_GENERICINST:
2270 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2273 case MONO_TYPE_MVAR:
2275 if (args [i]->type != STACK_VTYPE)
2279 g_error ("unknown type 0x%02x in check_call_signature",
2287 callvirt_to_call (int opcode)
2290 case OP_CALL_MEMBASE:
2292 case OP_VOIDCALL_MEMBASE:
2294 case OP_FCALL_MEMBASE:
2296 case OP_VCALL_MEMBASE:
2298 case OP_LCALL_MEMBASE:
2301 g_assert_not_reached ();
2307 #ifdef MONO_ARCH_HAVE_IMT
2308 /* Either METHOD or IMT_ARG needs to be set */
2310 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoMethod *method, MonoInst *imt_arg)
2314 if (COMPILE_LLVM (cfg)) {
2315 method_reg = alloc_preg (cfg);
2318 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2319 } else if (cfg->compile_aot) {
2320 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, method, MONO_PATCH_INFO_METHODCONST);
2323 MONO_INST_NEW (cfg, ins, OP_PCONST);
2324 ins->inst_p0 = method;
2325 ins->dreg = method_reg;
2326 MONO_ADD_INS (cfg->cbb, ins);
2330 call->imt_arg_reg = method_reg;
2332 #ifdef MONO_ARCH_IMT_REG
2333 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2335 /* Need this to keep the IMT arg alive */
2336 mono_call_inst_add_outarg_reg (cfg, call, method_reg, 0, FALSE);
2341 #ifdef MONO_ARCH_IMT_REG
2342 method_reg = alloc_preg (cfg);
2345 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2346 } else if (cfg->compile_aot) {
2347 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, method, MONO_PATCH_INFO_METHODCONST);
2350 MONO_INST_NEW (cfg, ins, OP_PCONST);
2351 ins->inst_p0 = method;
2352 ins->dreg = method_reg;
2353 MONO_ADD_INS (cfg->cbb, ins);
2356 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2358 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2363 static MonoJumpInfo *
2364 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2366 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2370 ji->data.target = target;
2376 mini_class_check_context_used (MonoCompile *cfg, MonoClass *klass)
2378 if (cfg->generic_sharing_context)
2379 return mono_class_check_context_used (klass);
2385 mini_method_check_context_used (MonoCompile *cfg, MonoMethod *method)
2387 if (cfg->generic_sharing_context)
2388 return mono_method_check_context_used (method);
2394 * check_method_sharing:
2396 * Check whenever the vtable or an mrgctx needs to be passed when calling CMETHOD.
2399 check_method_sharing (MonoCompile *cfg, MonoMethod *cmethod, gboolean *out_pass_vtable, gboolean *out_pass_mrgctx)
2401 gboolean pass_vtable = FALSE;
2402 gboolean pass_mrgctx = FALSE;
2404 if (((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
2405 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
2406 gboolean sharable = FALSE;
2408 if (mono_method_is_generic_sharable (cmethod, TRUE)) {
2411 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
2412 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
2413 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
2415 sharable = sharing_enabled && context_sharable;
2419 * Pass vtable iff target method might
2420 * be shared, which means that sharing
2421 * is enabled for its class and its
2422 * context is sharable (and it's not a
2425 if (sharable && !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
2429 if (mini_method_get_context (cmethod) &&
2430 mini_method_get_context (cmethod)->method_inst) {
2431 g_assert (!pass_vtable);
2433 if (mono_method_is_generic_sharable (cmethod, TRUE)) {
2436 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
2437 MonoGenericContext *context = mini_method_get_context (cmethod);
2438 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
2440 if (sharing_enabled && context_sharable)
2442 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, mono_method_signature (cmethod)))
2447 if (out_pass_vtable)
2448 *out_pass_vtable = pass_vtable;
2449 if (out_pass_mrgctx)
2450 *out_pass_mrgctx = pass_mrgctx;
2453 inline static MonoCallInst *
2454 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2455 MonoInst **args, int calli, int virtual, int tail, int rgctx, int unbox_trampoline)
2459 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2464 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2466 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
2469 call->signature = sig;
2470 call->rgctx_reg = rgctx;
2471 sig_ret = mini_replace_type (sig->ret);
2473 type_to_eval_stack_type ((cfg), sig_ret, &call->inst);
2476 if (mini_type_is_vtype (cfg, sig_ret)) {
2477 call->vret_var = cfg->vret_addr;
2478 //g_assert_not_reached ();
2480 } else if (mini_type_is_vtype (cfg, sig_ret)) {
2481 MonoInst *temp = mono_compile_create_var (cfg, sig_ret, OP_LOCAL);
2484 temp->backend.is_pinvoke = sig->pinvoke;
2487 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2488 * address of return value to increase optimization opportunities.
2489 * Before vtype decomposition, the dreg of the call ins itself represents the
2490 * fact the call modifies the return value. After decomposition, the call will
2491 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2492 * will be transformed into an LDADDR.
2494 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2495 loada->dreg = alloc_preg (cfg);
2496 loada->inst_p0 = temp;
2497 /* We reference the call too since call->dreg could change during optimization */
2498 loada->inst_p1 = call;
2499 MONO_ADD_INS (cfg->cbb, loada);
2501 call->inst.dreg = temp->dreg;
2503 call->vret_var = loada;
2504 } else if (!MONO_TYPE_IS_VOID (sig_ret))
2505 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2507 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2508 if (COMPILE_SOFT_FLOAT (cfg)) {
2510 * If the call has a float argument, we would need to do an r8->r4 conversion using
2511 * an icall, but that cannot be done during the call sequence since it would clobber
2512 * the call registers + the stack. So we do it before emitting the call.
2514 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2516 MonoInst *in = call->args [i];
2518 if (i >= sig->hasthis)
2519 t = sig->params [i - sig->hasthis];
2521 t = &mono_defaults.int_class->byval_arg;
2522 t = mono_type_get_underlying_type (t);
2524 if (!t->byref && t->type == MONO_TYPE_R4) {
2525 MonoInst *iargs [1];
2529 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2531 /* The result will be in an int vreg */
2532 call->args [i] = conv;
2538 call->need_unbox_trampoline = unbox_trampoline;
2541 if (COMPILE_LLVM (cfg))
2542 mono_llvm_emit_call (cfg, call);
2544 mono_arch_emit_call (cfg, call);
2546 mono_arch_emit_call (cfg, call);
2549 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2550 cfg->flags |= MONO_CFG_HAS_CALLS;
2556 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2558 #ifdef MONO_ARCH_RGCTX_REG
2559 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2560 cfg->uses_rgctx_reg = TRUE;
2561 call->rgctx_reg = TRUE;
2563 call->rgctx_arg_reg = rgctx_reg;
2570 inline static MonoInst*
2571 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *imt_arg, MonoInst *rgctx_arg)
2576 gboolean check_sp = FALSE;
2578 if (cfg->check_pinvoke_callconv && cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
2579 WrapperInfo *info = mono_marshal_get_wrapper_info (cfg->method);
2581 if (info && info->subtype == WRAPPER_SUBTYPE_PINVOKE)
2586 rgctx_reg = mono_alloc_preg (cfg);
2587 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2591 if (!cfg->stack_inbalance_var)
2592 cfg->stack_inbalance_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2594 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2595 ins->dreg = cfg->stack_inbalance_var->dreg;
2596 MONO_ADD_INS (cfg->cbb, ins);
2599 call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE, rgctx_arg ? TRUE : FALSE, FALSE);
2601 call->inst.sreg1 = addr->dreg;
2604 emit_imt_argument (cfg, call, NULL, imt_arg);
2606 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2611 sp_reg = mono_alloc_preg (cfg);
2613 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2615 MONO_ADD_INS (cfg->cbb, ins);
2617 /* Restore the stack so we don't crash when throwing the exception */
2618 MONO_INST_NEW (cfg, ins, OP_SET_SP);
2619 ins->sreg1 = cfg->stack_inbalance_var->dreg;
2620 MONO_ADD_INS (cfg->cbb, ins);
2622 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, cfg->stack_inbalance_var->dreg, sp_reg);
2623 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ExecutionEngineException");
2627 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2629 return (MonoInst*)call;
2633 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2636 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type);
2638 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2641 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig, gboolean tail,
2642 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *rgctx_arg)
2644 #ifndef DISABLE_REMOTING
2645 gboolean might_be_remote = FALSE;
2647 gboolean virtual = this != NULL;
2648 gboolean enable_for_aot = TRUE;
2652 gboolean need_unbox_trampoline;
2655 sig = mono_method_signature (method);
2658 rgctx_reg = mono_alloc_preg (cfg);
2659 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2662 if (method->string_ctor) {
2663 /* Create the real signature */
2664 /* FIXME: Cache these */
2665 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2666 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2671 context_used = mini_method_check_context_used (cfg, method);
2673 #ifndef DISABLE_REMOTING
2674 might_be_remote = this && sig->hasthis &&
2675 (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) &&
2676 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && (!MONO_CHECK_THIS (this) || context_used);
2678 if (might_be_remote && context_used) {
2681 g_assert (cfg->generic_sharing_context);
2683 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2685 return mono_emit_calli (cfg, sig, args, addr, NULL, NULL);
2689 need_unbox_trampoline = method->klass == mono_defaults.object_class || (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE);
2691 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, tail, rgctx_arg ? TRUE : FALSE, need_unbox_trampoline);
2693 #ifndef DISABLE_REMOTING
2694 if (might_be_remote)
2695 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2698 call->method = method;
2699 call->inst.flags |= MONO_INST_HAS_METHOD;
2700 call->inst.inst_left = this;
2701 call->tail_call = tail;
2704 int vtable_reg, slot_reg, this_reg;
2707 this_reg = this->dreg;
2709 if (ARCH_HAVE_DELEGATE_TRAMPOLINES && (method->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (method->name, "Invoke")) {
2710 MonoInst *dummy_use;
2712 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2714 /* Make a call to delegate->invoke_impl */
2715 call->inst.inst_basereg = this_reg;
2716 call->inst.inst_offset = G_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2717 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2719 /* We must emit a dummy use here because the delegate trampoline will
2720 replace the 'this' argument with the delegate target making this activation
2721 no longer a root for the delegate.
2722 This is an issue for delegates that target collectible code such as dynamic
2723 methods of GC'able assemblies.
2725 For a test case look into #667921.
2727 FIXME: a dummy use is not the best way to do it as the local register allocator
2728 will put it on a caller save register and spil it around the call.
2729 Ideally, we would either put it on a callee save register or only do the store part.
2731 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [0]);
2733 return (MonoInst*)call;
2736 if ((!cfg->compile_aot || enable_for_aot) &&
2737 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2738 (MONO_METHOD_IS_FINAL (method) &&
2739 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2740 !(mono_class_is_marshalbyref (method->klass) && context_used)) {
2742 * the method is not virtual, we just need to ensure this is not null
2743 * and then we can call the method directly.
2745 #ifndef DISABLE_REMOTING
2746 if (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) {
2748 * The check above ensures method is not gshared, this is needed since
2749 * gshared methods can't have wrappers.
2751 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2755 if (!method->string_ctor)
2756 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2758 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2759 } else if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2761 * the method is virtual, but we can statically dispatch since either
2762 * it's class or the method itself are sealed.
2763 * But first we need to ensure it's not a null reference.
2765 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2767 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2769 vtable_reg = alloc_preg (cfg);
2770 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2771 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2773 #ifdef MONO_ARCH_HAVE_IMT
2775 guint32 imt_slot = mono_method_get_imt_slot (method);
2776 emit_imt_argument (cfg, call, call->method, imt_arg);
2777 slot_reg = vtable_reg;
2778 offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2781 if (slot_reg == -1) {
2782 slot_reg = alloc_preg (cfg);
2783 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2784 offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
2787 slot_reg = vtable_reg;
2788 offset = G_STRUCT_OFFSET (MonoVTable, vtable) +
2789 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2790 #ifdef MONO_ARCH_HAVE_IMT
2792 g_assert (mono_method_signature (method)->generic_param_count);
2793 emit_imt_argument (cfg, call, call->method, imt_arg);
2798 call->inst.sreg1 = slot_reg;
2799 call->inst.inst_offset = offset;
2800 call->virtual = TRUE;
2804 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2807 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2809 return (MonoInst*)call;
2813 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2815 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), FALSE, args, this, NULL, NULL);
2819 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2826 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE, FALSE, FALSE);
2829 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2831 return (MonoInst*)call;
2835 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2837 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2841 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2845 * mono_emit_abs_call:
2847 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2849 inline static MonoInst*
2850 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2851 MonoMethodSignature *sig, MonoInst **args)
2853 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2857 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2860 if (cfg->abs_patches == NULL)
2861 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2862 g_hash_table_insert (cfg->abs_patches, ji, ji);
2863 ins = mono_emit_native_call (cfg, ji, sig, args);
2864 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2869 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2871 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2872 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2876 * Native code might return non register sized integers
2877 * without initializing the upper bits.
2879 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
2880 case OP_LOADI1_MEMBASE:
2881 widen_op = OP_ICONV_TO_I1;
2883 case OP_LOADU1_MEMBASE:
2884 widen_op = OP_ICONV_TO_U1;
2886 case OP_LOADI2_MEMBASE:
2887 widen_op = OP_ICONV_TO_I2;
2889 case OP_LOADU2_MEMBASE:
2890 widen_op = OP_ICONV_TO_U2;
2896 if (widen_op != -1) {
2897 int dreg = alloc_preg (cfg);
2900 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
2901 widen->type = ins->type;
2911 get_memcpy_method (void)
2913 static MonoMethod *memcpy_method = NULL;
2914 if (!memcpy_method) {
2915 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2917 g_error ("Old corlib found. Install a new one");
2919 return memcpy_method;
2923 create_write_barrier_bitmap (MonoCompile *cfg, MonoClass *klass, unsigned *wb_bitmap, int offset)
2925 MonoClassField *field;
2926 gpointer iter = NULL;
2928 while ((field = mono_class_get_fields (klass, &iter))) {
2931 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
2933 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
2934 if (mini_type_is_reference (cfg, mono_field_get_type (field))) {
2935 g_assert ((foffset % SIZEOF_VOID_P) == 0);
2936 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
2938 MonoClass *field_class = mono_class_from_mono_type (field->type);
2939 if (field_class->has_references)
2940 create_write_barrier_bitmap (cfg, field_class, wb_bitmap, offset + foffset);
2946 emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value)
2948 int card_table_shift_bits;
2949 gpointer card_table_mask;
2951 MonoInst *dummy_use;
2952 int nursery_shift_bits;
2953 size_t nursery_size;
2954 gboolean has_card_table_wb = FALSE;
2956 if (!cfg->gen_write_barriers)
2959 card_table = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
2961 mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
2963 #ifdef MONO_ARCH_HAVE_CARD_TABLE_WBARRIER
2964 has_card_table_wb = TRUE;
2967 if (has_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0 && !COMPILE_LLVM (cfg)) {
2970 MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
2971 wbarrier->sreg1 = ptr->dreg;
2972 wbarrier->sreg2 = value->dreg;
2973 MONO_ADD_INS (cfg->cbb, wbarrier);
2974 } else if (card_table) {
2975 int offset_reg = alloc_preg (cfg);
2976 int card_reg = alloc_preg (cfg);
2979 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
2980 if (card_table_mask)
2981 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
2983 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
2984 * IMM's larger than 32bits.
2986 if (cfg->compile_aot) {
2987 MONO_EMIT_NEW_AOTCONST (cfg, card_reg, NULL, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR);
2989 MONO_INST_NEW (cfg, ins, OP_PCONST);
2990 ins->inst_p0 = card_table;
2991 ins->dreg = card_reg;
2992 MONO_ADD_INS (cfg->cbb, ins);
2995 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
2996 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
2998 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
2999 mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
3002 EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
3006 mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
3008 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
3009 unsigned need_wb = 0;
3014 /*types with references can't have alignment smaller than sizeof(void*) */
3015 if (align < SIZEOF_VOID_P)
3018 /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
3019 if (size > 32 * SIZEOF_VOID_P)
3022 create_write_barrier_bitmap (cfg, klass, &need_wb, 0);
3024 /* We don't unroll more than 5 stores to avoid code bloat. */
3025 if (size > 5 * SIZEOF_VOID_P) {
3026 /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
3027 size += (SIZEOF_VOID_P - 1);
3028 size &= ~(SIZEOF_VOID_P - 1);
3030 EMIT_NEW_ICONST (cfg, iargs [2], size);
3031 EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
3032 mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
3036 destreg = iargs [0]->dreg;
3037 srcreg = iargs [1]->dreg;
3040 dest_ptr_reg = alloc_preg (cfg);
3041 tmp_reg = alloc_preg (cfg);
3044 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
3046 while (size >= SIZEOF_VOID_P) {
3047 MonoInst *load_inst;
3048 MONO_INST_NEW (cfg, load_inst, OP_LOAD_MEMBASE);
3049 load_inst->dreg = tmp_reg;
3050 load_inst->inst_basereg = srcreg;
3051 load_inst->inst_offset = offset;
3052 MONO_ADD_INS (cfg->cbb, load_inst);
3054 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
3057 emit_write_barrier (cfg, iargs [0], load_inst);
3059 offset += SIZEOF_VOID_P;
3060 size -= SIZEOF_VOID_P;
3063 /*tmp += sizeof (void*)*/
3064 if (size >= SIZEOF_VOID_P) {
3065 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
3066 MONO_ADD_INS (cfg->cbb, iargs [0]);
3070 /* Those cannot be references since size < sizeof (void*) */
3072 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
3073 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
3079 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
3080 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
3086 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
3087 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
3096 * Emit code to copy a valuetype of type @klass whose address is stored in
3097 * @src->dreg to memory whose address is stored at @dest->dreg.
3100 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
3102 MonoInst *iargs [4];
3103 int context_used, n;
3105 MonoMethod *memcpy_method;
3106 MonoInst *size_ins = NULL;
3107 MonoInst *memcpy_ins = NULL;
3111 * This check breaks with spilled vars... need to handle it during verification anyway.
3112 * g_assert (klass && klass == src->klass && klass == dest->klass);
3115 if (mini_is_gsharedvt_klass (cfg, klass)) {
3117 context_used = mini_class_check_context_used (cfg, klass);
3118 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3119 memcpy_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_MEMCPY);
3123 n = mono_class_native_size (klass, &align);
3125 n = mono_class_value_size (klass, &align);
3127 /* if native is true there should be no references in the struct */
3128 if (cfg->gen_write_barriers && (klass->has_references || size_ins) && !native) {
3129 /* Avoid barriers when storing to the stack */
3130 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
3131 (dest->opcode == OP_LDADDR))) {
3137 context_used = mini_class_check_context_used (cfg, klass);
3139 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
3140 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
3142 } else if (context_used) {
3143 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3145 if (cfg->compile_aot) {
3146 EMIT_NEW_CLASSCONST (cfg, iargs [2], klass);
3148 EMIT_NEW_PCONST (cfg, iargs [2], klass);
3149 mono_class_compute_gc_descriptor (klass);
3154 mono_emit_jit_icall (cfg, mono_gsharedvt_value_copy, iargs);
3156 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
3161 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
3162 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
3163 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
3168 iargs [2] = size_ins;
3170 EMIT_NEW_ICONST (cfg, iargs [2], n);
3172 memcpy_method = get_memcpy_method ();
3174 mono_emit_calli (cfg, mono_method_signature (memcpy_method), iargs, memcpy_ins, NULL, NULL);
3176 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
3181 get_memset_method (void)
3183 static MonoMethod *memset_method = NULL;
3184 if (!memset_method) {
3185 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
3187 g_error ("Old corlib found. Install a new one");
3189 return memset_method;
3193 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
3195 MonoInst *iargs [3];
3196 int n, context_used;
3198 MonoMethod *memset_method;
3199 MonoInst *size_ins = NULL;
3200 MonoInst *bzero_ins = NULL;
3201 static MonoMethod *bzero_method;
3203 /* FIXME: Optimize this for the case when dest is an LDADDR */
3205 mono_class_init (klass);
3206 if (mini_is_gsharedvt_klass (cfg, klass)) {
3207 context_used = mini_class_check_context_used (cfg, klass);
3208 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3209 bzero_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_BZERO);
3211 bzero_method = mono_class_get_method_from_name (mono_defaults.string_class, "bzero_aligned_1", 2);
3212 g_assert (bzero_method);
3214 iargs [1] = size_ins;
3215 mono_emit_calli (cfg, mono_method_signature (bzero_method), iargs, bzero_ins, NULL, NULL);
3219 n = mono_class_value_size (klass, &align);
3221 if (n <= sizeof (gpointer) * 5) {
3222 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
3225 memset_method = get_memset_method ();
3227 EMIT_NEW_ICONST (cfg, iargs [1], 0);
3228 EMIT_NEW_ICONST (cfg, iargs [2], n);
3229 mono_emit_method_call (cfg, memset_method, iargs, NULL);
3234 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
3236 MonoInst *this = NULL;
3238 g_assert (cfg->generic_sharing_context);
3240 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
3241 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
3242 !method->klass->valuetype)
3243 EMIT_NEW_ARGLOAD (cfg, this, 0);
3245 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
3246 MonoInst *mrgctx_loc, *mrgctx_var;
3249 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
3251 mrgctx_loc = mono_get_vtable_var (cfg);
3252 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
3255 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
3256 MonoInst *vtable_loc, *vtable_var;
3260 vtable_loc = mono_get_vtable_var (cfg);
3261 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
3263 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
3264 MonoInst *mrgctx_var = vtable_var;
3267 vtable_reg = alloc_preg (cfg);
3268 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, G_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
3269 vtable_var->type = STACK_PTR;
3277 vtable_reg = alloc_preg (cfg);
3278 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3283 static MonoJumpInfoRgctxEntry *
3284 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, MonoRgctxInfoType info_type)
3286 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
3287 res->method = method;
3288 res->in_mrgctx = in_mrgctx;
3289 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
3290 res->data->type = patch_type;
3291 res->data->data.target = patch_data;
3292 res->info_type = info_type;
3297 static inline MonoInst*
3298 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3300 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
3304 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
3305 MonoClass *klass, MonoRgctxInfoType rgctx_type)
3307 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
3308 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3310 return emit_rgctx_fetch (cfg, rgctx, entry);
3314 emit_get_rgctx_sig (MonoCompile *cfg, int context_used,
3315 MonoMethodSignature *sig, MonoRgctxInfoType rgctx_type)
3317 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_SIGNATURE, sig, rgctx_type);
3318 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3320 return emit_rgctx_fetch (cfg, rgctx, entry);
3324 emit_get_rgctx_gsharedvt_call (MonoCompile *cfg, int context_used,
3325 MonoMethodSignature *sig, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3327 MonoJumpInfoGSharedVtCall *call_info;
3328 MonoJumpInfoRgctxEntry *entry;
3331 call_info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoGSharedVtCall));
3332 call_info->sig = sig;
3333 call_info->method = cmethod;
3335 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_CALL, call_info, rgctx_type);
3336 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3338 return emit_rgctx_fetch (cfg, rgctx, entry);
3343 emit_get_rgctx_gsharedvt_method (MonoCompile *cfg, int context_used,
3344 MonoMethod *cmethod, MonoGSharedVtMethodInfo *info)
3346 MonoJumpInfoRgctxEntry *entry;
3349 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_METHOD, info, MONO_RGCTX_INFO_METHOD_GSHAREDVT_INFO);
3350 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3352 return emit_rgctx_fetch (cfg, rgctx, entry);
3356 * emit_get_rgctx_method:
3358 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
3359 * normal constants, else emit a load from the rgctx.
3362 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
3363 MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3365 if (!context_used) {
3368 switch (rgctx_type) {
3369 case MONO_RGCTX_INFO_METHOD:
3370 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
3372 case MONO_RGCTX_INFO_METHOD_RGCTX:
3373 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
3376 g_assert_not_reached ();
3379 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
3380 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3382 return emit_rgctx_fetch (cfg, rgctx, entry);
3387 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
3388 MonoClassField *field, MonoRgctxInfoType rgctx_type)
3390 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
3391 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3393 return emit_rgctx_fetch (cfg, rgctx, entry);
3397 get_gsharedvt_info_slot (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3399 MonoGSharedVtMethodInfo *info = cfg->gsharedvt_info;
3400 MonoRuntimeGenericContextInfoTemplate *template;
3405 for (i = 0; i < info->num_entries; ++i) {
3406 MonoRuntimeGenericContextInfoTemplate *otemplate = &info->entries [i];
3408 if (otemplate->info_type == rgctx_type && otemplate->data == data && rgctx_type != MONO_RGCTX_INFO_LOCAL_OFFSET)
3412 if (info->num_entries == info->count_entries) {
3413 MonoRuntimeGenericContextInfoTemplate *new_entries;
3414 int new_count_entries = info->count_entries ? info->count_entries * 2 : 16;
3416 new_entries = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * new_count_entries);
3418 memcpy (new_entries, info->entries, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
3419 info->entries = new_entries;
3420 info->count_entries = new_count_entries;
3423 idx = info->num_entries;
3424 template = &info->entries [idx];
3425 template->info_type = rgctx_type;
3426 template->data = data;
3428 info->num_entries ++;
3434 * emit_get_gsharedvt_info:
3436 * This is similar to emit_get_rgctx_.., but loads the data from the gsharedvt info var instead of calling an rgctx fetch trampoline.
3439 emit_get_gsharedvt_info (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3444 idx = get_gsharedvt_info_slot (cfg, data, rgctx_type);
3445 /* Load info->entries [idx] */
3446 dreg = alloc_preg (cfg);
3447 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, cfg->gsharedvt_info_var->dreg, G_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
3453 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type)
3455 return emit_get_gsharedvt_info (cfg, &klass->byval_arg, rgctx_type);
3459 * On return the caller must check @klass for load errors.
3462 emit_generic_class_init (MonoCompile *cfg, MonoClass *klass)
3464 MonoInst *vtable_arg;
3468 context_used = mini_class_check_context_used (cfg, klass);
3471 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
3472 klass, MONO_RGCTX_INFO_VTABLE);
3474 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3478 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
3481 if (COMPILE_LLVM (cfg))
3482 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline_llvm, &vtable_arg);
3484 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable_arg);
3485 #ifdef MONO_ARCH_VTABLE_REG
3486 mono_call_inst_add_outarg_reg (cfg, call, vtable_arg->dreg, MONO_ARCH_VTABLE_REG, FALSE);
3487 cfg->uses_vtable_reg = TRUE;
3494 emit_seq_point (MonoCompile *cfg, MonoMethod *method, guint8* ip, gboolean intr_loc, gboolean nonempty_stack)
3498 if (cfg->gen_seq_points && cfg->method == method) {
3499 NEW_SEQ_POINT (cfg, ins, ip - cfg->header->code, intr_loc);
3501 ins->flags |= MONO_INST_NONEMPTY_STACK;
3502 MONO_ADD_INS (cfg->cbb, ins);
3507 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg, gboolean null_check, MonoBasicBlock **out_bblock)
3509 if (mini_get_debug_options ()->better_cast_details) {
3510 int to_klass_reg = alloc_preg (cfg);
3511 int vtable_reg = alloc_preg (cfg);
3512 int klass_reg = alloc_preg (cfg);
3513 MonoBasicBlock *is_null_bb = NULL;
3517 NEW_BBLOCK (cfg, is_null_bb);
3519 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3520 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3523 tls_get = mono_get_jit_tls_intrinsic (cfg);
3525 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3529 MONO_ADD_INS (cfg->cbb, tls_get);
3530 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3531 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3533 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3534 MONO_EMIT_NEW_PCONST (cfg, to_klass_reg, klass);
3535 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3538 MONO_START_BB (cfg, is_null_bb);
3540 *out_bblock = cfg->cbb;
3546 reset_cast_details (MonoCompile *cfg)
3548 /* Reset the variables holding the cast details */
3549 if (mini_get_debug_options ()->better_cast_details) {
3550 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3552 MONO_ADD_INS (cfg->cbb, tls_get);
3553 /* It is enough to reset the from field */
3554 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3559 * On return the caller must check @array_class for load errors
3562 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
3564 int vtable_reg = alloc_preg (cfg);
3567 context_used = mini_class_check_context_used (cfg, array_class);
3569 save_cast_details (cfg, array_class, obj->dreg, FALSE, NULL);
3571 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3573 if (cfg->opt & MONO_OPT_SHARED) {
3574 int class_reg = alloc_preg (cfg);
3575 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3576 if (cfg->compile_aot) {
3577 int klass_reg = alloc_preg (cfg);
3578 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
3579 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
3581 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
3583 } else if (context_used) {
3584 MonoInst *vtable_ins;
3586 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
3587 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
3589 if (cfg->compile_aot) {
3593 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3595 vt_reg = alloc_preg (cfg);
3596 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
3597 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
3600 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3602 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
3606 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
3608 reset_cast_details (cfg);
3612 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
3613 * generic code is generated.
3616 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
3618 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
3621 MonoInst *rgctx, *addr;
3623 /* FIXME: What if the class is shared? We might not
3624 have to get the address of the method from the
3626 addr = emit_get_rgctx_method (cfg, context_used, method,
3627 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3629 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3631 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
3633 gboolean pass_vtable, pass_mrgctx;
3634 MonoInst *rgctx_arg = NULL;
3636 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
3637 g_assert (!pass_mrgctx);
3640 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3643 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
3646 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
3651 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
3655 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
3656 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
3657 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
3658 int rank_reg = alloc_dreg (cfg ,STACK_I4);
3660 obj_reg = sp [0]->dreg;
3661 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3662 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3664 /* FIXME: generics */
3665 g_assert (klass->rank == 0);
3668 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
3669 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3671 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3672 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, element_class));
3675 MonoInst *element_class;
3677 /* This assertion is from the unboxcast insn */
3678 g_assert (klass->rank == 0);
3680 element_class = emit_get_rgctx_klass (cfg, context_used,
3681 klass->element_class, MONO_RGCTX_INFO_KLASS);
3683 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
3684 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3686 save_cast_details (cfg, klass->element_class, obj_reg, FALSE, NULL);
3687 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
3688 reset_cast_details (cfg);
3691 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), obj_reg, sizeof (MonoObject));
3692 MONO_ADD_INS (cfg->cbb, add);
3693 add->type = STACK_MP;
3700 handle_unbox_gsharedvt (MonoCompile *cfg, MonoClass *klass, MonoInst *obj, MonoBasicBlock **out_cbb)
3702 MonoInst *addr, *klass_inst, *is_ref, *args[16];
3703 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
3707 klass_inst = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_KLASS);
3713 args [1] = klass_inst;
3716 obj = mono_emit_jit_icall (cfg, mono_object_castclass_unbox, args);
3718 NEW_BBLOCK (cfg, is_ref_bb);
3719 NEW_BBLOCK (cfg, is_nullable_bb);
3720 NEW_BBLOCK (cfg, end_bb);
3721 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
3722 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 1);
3723 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
3725 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 2);
3726 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
3728 /* This will contain either the address of the unboxed vtype, or an address of the temporary where the ref is stored */
3729 addr_reg = alloc_dreg (cfg, STACK_MP);
3733 NEW_BIALU_IMM (cfg, addr, OP_ADD_IMM, addr_reg, obj->dreg, sizeof (MonoObject));
3734 MONO_ADD_INS (cfg->cbb, addr);
3736 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3739 MONO_START_BB (cfg, is_ref_bb);
3741 /* Save the ref to a temporary */
3742 dreg = alloc_ireg (cfg);
3743 EMIT_NEW_VARLOADA_VREG (cfg, addr, dreg, &klass->byval_arg);
3744 addr->dreg = addr_reg;
3745 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, obj->dreg);
3746 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3749 MONO_START_BB (cfg, is_nullable_bb);
3752 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_NULLABLE_CLASS_UNBOX);
3753 MonoInst *unbox_call;
3754 MonoMethodSignature *unbox_sig;
3757 var = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
3759 unbox_sig = mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
3760 unbox_sig->ret = &klass->byval_arg;
3761 unbox_sig->param_count = 1;
3762 unbox_sig->params [0] = &mono_defaults.object_class->byval_arg;
3763 unbox_call = mono_emit_calli (cfg, unbox_sig, &obj, addr, NULL, NULL);
3765 EMIT_NEW_VARLOADA_VREG (cfg, addr, unbox_call->dreg, &klass->byval_arg);
3766 addr->dreg = addr_reg;
3769 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3772 MONO_START_BB (cfg, end_bb);
3775 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr_reg, 0);
3777 *out_cbb = cfg->cbb;
3783 * Returns NULL and set the cfg exception on error.
3786 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
3788 MonoInst *iargs [2];
3794 MonoInst *iargs [2];
3796 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (klass, for_box);
3798 if (cfg->opt & MONO_OPT_SHARED)
3799 rgctx_info = MONO_RGCTX_INFO_KLASS;
3801 rgctx_info = MONO_RGCTX_INFO_VTABLE;
3802 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
3804 if (cfg->opt & MONO_OPT_SHARED) {
3805 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3807 alloc_ftn = mono_object_new;
3810 alloc_ftn = mono_object_new_specific;
3813 if (managed_alloc && !(cfg->opt & MONO_OPT_SHARED))
3814 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3816 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3819 if (cfg->opt & MONO_OPT_SHARED) {
3820 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3821 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
3823 alloc_ftn = mono_object_new;
3824 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
3825 /* This happens often in argument checking code, eg. throw new FooException... */
3826 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
3827 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
3828 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
3830 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3831 MonoMethod *managed_alloc = NULL;
3835 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
3836 cfg->exception_ptr = klass;
3840 #ifndef MONO_CROSS_COMPILE
3841 managed_alloc = mono_gc_get_managed_allocator (klass, for_box);
3844 if (managed_alloc) {
3845 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3846 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3848 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
3850 guint32 lw = vtable->klass->instance_size;
3851 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
3852 EMIT_NEW_ICONST (cfg, iargs [0], lw);
3853 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
3856 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3860 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3864 * Returns NULL and set the cfg exception on error.
3867 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used, MonoBasicBlock **out_cbb)
3869 MonoInst *alloc, *ins;
3871 *out_cbb = cfg->cbb;
3873 if (mono_class_is_nullable (klass)) {
3874 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
3877 /* FIXME: What if the class is shared? We might not
3878 have to get the method address from the RGCTX. */
3879 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
3880 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3881 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3883 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
3885 gboolean pass_vtable, pass_mrgctx;
3886 MonoInst *rgctx_arg = NULL;
3888 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
3889 g_assert (!pass_mrgctx);
3892 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3895 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
3898 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
3902 if (mini_is_gsharedvt_klass (cfg, klass)) {
3903 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
3904 MonoInst *res, *is_ref, *src_var, *addr;
3907 dreg = alloc_ireg (cfg);
3909 NEW_BBLOCK (cfg, is_ref_bb);
3910 NEW_BBLOCK (cfg, is_nullable_bb);
3911 NEW_BBLOCK (cfg, end_bb);
3912 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
3913 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 1);
3914 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
3916 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 2);
3917 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
3920 alloc = handle_alloc (cfg, klass, TRUE, context_used);
3923 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3924 ins->opcode = OP_STOREV_MEMBASE;
3926 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, alloc->dreg);
3927 res->type = STACK_OBJ;
3929 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3932 MONO_START_BB (cfg, is_ref_bb);
3933 addr_reg = alloc_ireg (cfg);
3935 /* val is a vtype, so has to load the value manually */
3936 src_var = get_vreg_to_inst (cfg, val->dreg);
3938 src_var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, val->dreg);
3939 EMIT_NEW_VARLOADA (cfg, addr, src_var, src_var->inst_vtype);
3940 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, addr->dreg, 0);
3941 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3944 MONO_START_BB (cfg, is_nullable_bb);
3947 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass,
3948 MONO_RGCTX_INFO_NULLABLE_CLASS_BOX);
3950 MonoMethodSignature *box_sig;
3953 * klass is Nullable<T>, need to call Nullable<T>.Box () using a gsharedvt signature, but we cannot
3954 * construct that method at JIT time, so have to do things by hand.
3956 box_sig = mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
3957 box_sig->ret = &mono_defaults.object_class->byval_arg;
3958 box_sig->param_count = 1;
3959 box_sig->params [0] = &klass->byval_arg;
3960 box_call = mono_emit_calli (cfg, box_sig, &val, addr, NULL, NULL);
3961 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, box_call->dreg);
3962 res->type = STACK_OBJ;
3966 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3968 MONO_START_BB (cfg, end_bb);
3970 *out_cbb = cfg->cbb;
3974 alloc = handle_alloc (cfg, klass, TRUE, context_used);
3978 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3985 mini_class_has_reference_variant_generic_argument (MonoCompile *cfg, MonoClass *klass, int context_used)
3988 MonoGenericContainer *container;
3989 MonoGenericInst *ginst;
3991 if (klass->generic_class) {
3992 container = klass->generic_class->container_class->generic_container;
3993 ginst = klass->generic_class->context.class_inst;
3994 } else if (klass->generic_container && context_used) {
3995 container = klass->generic_container;
3996 ginst = container->context.class_inst;
4001 for (i = 0; i < container->type_argc; ++i) {
4003 if (!(mono_generic_container_get_param_info (container, i)->flags & (MONO_GEN_PARAM_VARIANT|MONO_GEN_PARAM_COVARIANT)))
4005 type = ginst->type_argv [i];
4006 if (mini_type_is_reference (cfg, type))
4012 // FIXME: This doesn't work yet (class libs tests fail?)
4013 #define is_complex_isinst(klass) (TRUE || (klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || mono_class_is_marshalbyref (klass) || (klass->flags & TYPE_ATTRIBUTE_SEALED) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
4016 emit_castclass_with_cache (MonoCompile *cfg, MonoClass *klass, MonoInst **args, MonoBasicBlock **out_bblock)
4018 MonoMethod *mono_castclass;
4021 mono_castclass = mono_marshal_get_castclass_with_cache ();
4023 save_cast_details (cfg, klass, args [0]->dreg, TRUE, out_bblock);
4024 res = mono_emit_method_call (cfg, mono_castclass, args, NULL);
4025 reset_cast_details (cfg);
4031 * Returns NULL and set the cfg exception on error.
4034 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
4036 MonoBasicBlock *is_null_bb;
4037 int obj_reg = src->dreg;
4038 int vtable_reg = alloc_preg (cfg);
4039 MonoInst *klass_inst = NULL;
4044 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
4045 MonoInst *cache_ins;
4047 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
4052 /* klass - it's the second element of the cache entry*/
4053 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
4056 args [2] = cache_ins;
4058 return emit_castclass_with_cache (cfg, klass, args, NULL);
4061 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
4064 NEW_BBLOCK (cfg, is_null_bb);
4066 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4067 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
4069 save_cast_details (cfg, klass, obj_reg, FALSE, NULL);
4071 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4072 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4073 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
4075 int klass_reg = alloc_preg (cfg);
4077 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4079 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4080 /* the remoting code is broken, access the class for now */
4081 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4082 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4084 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4085 cfg->exception_ptr = klass;
4088 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4090 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4091 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4093 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
4095 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4096 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
4100 MONO_START_BB (cfg, is_null_bb);
4102 reset_cast_details (cfg);
4108 * Returns NULL and set the cfg exception on error.
4111 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
4114 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
4115 int obj_reg = src->dreg;
4116 int vtable_reg = alloc_preg (cfg);
4117 int res_reg = alloc_ireg_ref (cfg);
4118 MonoInst *klass_inst = NULL;
4123 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
4124 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
4125 MonoInst *cache_ins;
4127 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
4132 /* klass - it's the second element of the cache entry*/
4133 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
4136 args [2] = cache_ins;
4138 return mono_emit_method_call (cfg, mono_isinst, args, NULL);
4141 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
4144 NEW_BBLOCK (cfg, is_null_bb);
4145 NEW_BBLOCK (cfg, false_bb);
4146 NEW_BBLOCK (cfg, end_bb);
4148 /* Do the assignment at the beginning, so the other assignment can be if converted */
4149 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
4150 ins->type = STACK_OBJ;
4153 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4154 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
4156 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4158 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4159 g_assert (!context_used);
4160 /* the is_null_bb target simply copies the input register to the output */
4161 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
4163 int klass_reg = alloc_preg (cfg);
4166 int rank_reg = alloc_preg (cfg);
4167 int eclass_reg = alloc_preg (cfg);
4169 g_assert (!context_used);
4170 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
4171 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
4172 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4173 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4174 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
4175 if (klass->cast_class == mono_defaults.object_class) {
4176 int parent_reg = alloc_preg (cfg);
4177 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
4178 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
4179 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4180 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4181 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
4182 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
4183 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4184 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4185 } else if (klass->cast_class == mono_defaults.enum_class) {
4186 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4187 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4188 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
4189 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4191 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
4192 /* Check that the object is a vector too */
4193 int bounds_reg = alloc_preg (cfg);
4194 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
4195 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
4196 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4199 /* the is_null_bb target simply copies the input register to the output */
4200 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4202 } else if (mono_class_is_nullable (klass)) {
4203 g_assert (!context_used);
4204 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4205 /* the is_null_bb target simply copies the input register to the output */
4206 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
4208 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4209 g_assert (!context_used);
4210 /* the remoting code is broken, access the class for now */
4211 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4212 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4214 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4215 cfg->exception_ptr = klass;
4218 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4220 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4221 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4223 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4224 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
4226 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4227 /* the is_null_bb target simply copies the input register to the output */
4228 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
4233 MONO_START_BB (cfg, false_bb);
4235 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
4236 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4238 MONO_START_BB (cfg, is_null_bb);
4240 MONO_START_BB (cfg, end_bb);
4246 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4248 /* This opcode takes as input an object reference and a class, and returns:
4249 0) if the object is an instance of the class,
4250 1) if the object is not instance of the class,
4251 2) if the object is a proxy whose type cannot be determined */
4254 #ifndef DISABLE_REMOTING
4255 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
4257 MonoBasicBlock *true_bb, *false_bb, *end_bb;
4259 int obj_reg = src->dreg;
4260 int dreg = alloc_ireg (cfg);
4262 #ifndef DISABLE_REMOTING
4263 int klass_reg = alloc_preg (cfg);
4266 NEW_BBLOCK (cfg, true_bb);
4267 NEW_BBLOCK (cfg, false_bb);
4268 NEW_BBLOCK (cfg, end_bb);
4269 #ifndef DISABLE_REMOTING
4270 NEW_BBLOCK (cfg, false2_bb);
4271 NEW_BBLOCK (cfg, no_proxy_bb);
4274 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4275 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
4277 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4278 #ifndef DISABLE_REMOTING
4279 NEW_BBLOCK (cfg, interface_fail_bb);
4282 tmp_reg = alloc_preg (cfg);
4283 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4284 #ifndef DISABLE_REMOTING
4285 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
4286 MONO_START_BB (cfg, interface_fail_bb);
4287 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4289 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
4291 tmp_reg = alloc_preg (cfg);
4292 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4293 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4294 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
4296 mini_emit_iface_cast (cfg, tmp_reg, klass, false_bb, true_bb);
4299 #ifndef DISABLE_REMOTING
4300 tmp_reg = alloc_preg (cfg);
4301 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4302 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4304 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4305 tmp_reg = alloc_preg (cfg);
4306 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4307 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4309 tmp_reg = alloc_preg (cfg);
4310 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4311 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4312 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4314 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
4315 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
4317 MONO_START_BB (cfg, no_proxy_bb);
4319 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
4321 g_error ("transparent proxy support is disabled while trying to JIT code that uses it");
4325 MONO_START_BB (cfg, false_bb);
4327 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4328 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4330 #ifndef DISABLE_REMOTING
4331 MONO_START_BB (cfg, false2_bb);
4333 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
4334 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4337 MONO_START_BB (cfg, true_bb);
4339 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4341 MONO_START_BB (cfg, end_bb);
4344 MONO_INST_NEW (cfg, ins, OP_ICONST);
4346 ins->type = STACK_I4;
4352 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4354 /* This opcode takes as input an object reference and a class, and returns:
4355 0) if the object is an instance of the class,
4356 1) if the object is a proxy whose type cannot be determined
4357 an InvalidCastException exception is thrown otherwhise*/
4360 #ifndef DISABLE_REMOTING
4361 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
4363 MonoBasicBlock *ok_result_bb;
4365 int obj_reg = src->dreg;
4366 int dreg = alloc_ireg (cfg);
4367 int tmp_reg = alloc_preg (cfg);
4369 #ifndef DISABLE_REMOTING
4370 int klass_reg = alloc_preg (cfg);
4371 NEW_BBLOCK (cfg, end_bb);
4374 NEW_BBLOCK (cfg, ok_result_bb);
4376 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4377 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
4379 save_cast_details (cfg, klass, obj_reg, FALSE, NULL);
4381 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4382 #ifndef DISABLE_REMOTING
4383 NEW_BBLOCK (cfg, interface_fail_bb);
4385 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4386 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
4387 MONO_START_BB (cfg, interface_fail_bb);
4388 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4390 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
4392 tmp_reg = alloc_preg (cfg);
4393 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4394 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4395 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
4397 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4398 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4400 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4401 mini_emit_iface_cast (cfg, tmp_reg, klass, NULL, NULL);
4402 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, ok_result_bb);
4405 #ifndef DISABLE_REMOTING
4406 NEW_BBLOCK (cfg, no_proxy_bb);
4408 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4409 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4410 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4412 tmp_reg = alloc_preg (cfg);
4413 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4414 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4416 tmp_reg = alloc_preg (cfg);
4417 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4418 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4419 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4421 NEW_BBLOCK (cfg, fail_1_bb);
4423 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
4425 MONO_START_BB (cfg, fail_1_bb);
4427 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4428 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4430 MONO_START_BB (cfg, no_proxy_bb);
4432 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
4434 g_error ("Transparent proxy support is disabled while trying to JIT code that uses it");
4438 MONO_START_BB (cfg, ok_result_bb);
4440 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4442 #ifndef DISABLE_REMOTING
4443 MONO_START_BB (cfg, end_bb);
4447 MONO_INST_NEW (cfg, ins, OP_ICONST);
4449 ins->type = STACK_I4;
4455 * Returns NULL and set the cfg exception on error.
4457 static G_GNUC_UNUSED MonoInst*
4458 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used)
4462 gpointer *trampoline;
4463 MonoInst *obj, *method_ins, *tramp_ins;
4467 obj = handle_alloc (cfg, klass, FALSE, 0);
4471 /* Inline the contents of mono_delegate_ctor */
4473 /* Set target field */
4474 /* Optimize away setting of NULL target */
4475 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0)) {
4476 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
4477 if (cfg->gen_write_barriers) {
4478 dreg = alloc_preg (cfg);
4479 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target));
4480 emit_write_barrier (cfg, ptr, target);
4484 /* Set method field */
4485 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
4486 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
4487 if (cfg->gen_write_barriers) {
4488 dreg = alloc_preg (cfg);
4489 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method));
4490 emit_write_barrier (cfg, ptr, method_ins);
4493 * To avoid looking up the compiled code belonging to the target method
4494 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
4495 * store it, and we fill it after the method has been compiled.
4497 if (!method->dynamic && !(cfg->opt & MONO_OPT_SHARED)) {
4498 MonoInst *code_slot_ins;
4501 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
4503 domain = mono_domain_get ();
4504 mono_domain_lock (domain);
4505 if (!domain_jit_info (domain)->method_code_hash)
4506 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
4507 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
4509 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
4510 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
4512 mono_domain_unlock (domain);
4514 if (cfg->compile_aot)
4515 EMIT_NEW_AOTCONST (cfg, code_slot_ins, MONO_PATCH_INFO_METHOD_CODE_SLOT, method);
4517 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
4519 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
4522 /* Set invoke_impl field */
4523 if (cfg->compile_aot) {
4524 MonoClassMethodPair *del_tramp;
4526 del_tramp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoClassMethodPair));
4527 del_tramp->klass = klass;
4528 del_tramp->method = context_used ? NULL : method;
4529 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, del_tramp);
4531 trampoline = mono_create_delegate_trampoline_with_method (cfg->domain, klass, context_used ? NULL : method);
4532 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
4534 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
4536 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
4542 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
4544 MonoJitICallInfo *info;
4546 /* Need to register the icall so it gets an icall wrapper */
4547 info = mono_get_array_new_va_icall (rank);
4549 cfg->flags |= MONO_CFG_HAS_VARARGS;
4551 /* mono_array_new_va () needs a vararg calling convention */
4552 cfg->disable_llvm = TRUE;
4554 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
4555 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
4559 mono_emit_load_got_addr (MonoCompile *cfg)
4561 MonoInst *getaddr, *dummy_use;
4563 if (!cfg->got_var || cfg->got_var_allocated)
4566 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
4567 getaddr->cil_code = cfg->header->code;
4568 getaddr->dreg = cfg->got_var->dreg;
4570 /* Add it to the start of the first bblock */
4571 if (cfg->bb_entry->code) {
4572 getaddr->next = cfg->bb_entry->code;
4573 cfg->bb_entry->code = getaddr;
4576 MONO_ADD_INS (cfg->bb_entry, getaddr);
4578 cfg->got_var_allocated = TRUE;
4581 * Add a dummy use to keep the got_var alive, since real uses might
4582 * only be generated by the back ends.
4583 * Add it to end_bblock, so the variable's lifetime covers the whole
4585 * It would be better to make the usage of the got var explicit in all
4586 * cases when the backend needs it (i.e. calls, throw etc.), so this
4587 * wouldn't be needed.
4589 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
4590 MONO_ADD_INS (cfg->bb_exit, dummy_use);
4593 static int inline_limit;
4594 static gboolean inline_limit_inited;
4597 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
4599 MonoMethodHeaderSummary header;
4601 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
4602 MonoMethodSignature *sig = mono_method_signature (method);
4606 if (cfg->generic_sharing_context)
4609 if (cfg->inline_depth > 10)
4612 #ifdef MONO_ARCH_HAVE_LMF_OPS
4613 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
4614 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
4615 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
4620 if (!mono_method_get_header_summary (method, &header))
4623 /*runtime, icall and pinvoke are checked by summary call*/
4624 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
4625 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
4626 (mono_class_is_marshalbyref (method->klass)) ||
4630 /* also consider num_locals? */
4631 /* Do the size check early to avoid creating vtables */
4632 if (!inline_limit_inited) {
4633 if (g_getenv ("MONO_INLINELIMIT"))
4634 inline_limit = atoi (g_getenv ("MONO_INLINELIMIT"));
4636 inline_limit = INLINE_LENGTH_LIMIT;
4637 inline_limit_inited = TRUE;
4639 if (header.code_size >= inline_limit && !(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
4643 * if we can initialize the class of the method right away, we do,
4644 * otherwise we don't allow inlining if the class needs initialization,
4645 * since it would mean inserting a call to mono_runtime_class_init()
4646 * inside the inlined code
4648 if (!(cfg->opt & MONO_OPT_SHARED)) {
4649 /* The AggressiveInlining hint is a good excuse to force that cctor to run. */
4650 if (method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING) {
4651 vtable = mono_class_vtable (cfg->domain, method->klass);
4654 if (!cfg->compile_aot)
4655 mono_runtime_class_init (vtable);
4656 } else if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
4657 if (cfg->run_cctors && method->klass->has_cctor) {
4658 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
4659 if (!method->klass->runtime_info)
4660 /* No vtable created yet */
4662 vtable = mono_class_vtable (cfg->domain, method->klass);
4665 /* This makes so that inline cannot trigger */
4666 /* .cctors: too many apps depend on them */
4667 /* running with a specific order... */
4668 if (! vtable->initialized)
4670 mono_runtime_class_init (vtable);
4672 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
4673 if (!method->klass->runtime_info)
4674 /* No vtable created yet */
4676 vtable = mono_class_vtable (cfg->domain, method->klass);
4679 if (!vtable->initialized)
4684 * If we're compiling for shared code
4685 * the cctor will need to be run at aot method load time, for example,
4686 * or at the end of the compilation of the inlining method.
4688 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
4693 * CAS - do not inline methods with declarative security
4694 * Note: this has to be before any possible return TRUE;
4696 if (mono_security_method_has_declsec (method))
4699 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
4700 if (mono_arch_is_soft_float ()) {
4702 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
4704 for (i = 0; i < sig->param_count; ++i)
4705 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
4714 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoClass *klass, MonoVTable *vtable)
4716 if (!cfg->compile_aot) {
4718 if (vtable->initialized)
4722 if (klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
4723 if (cfg->method == method)
4727 if (!mono_class_needs_cctor_run (klass, method))
4730 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (klass == method->klass))
4731 /* The initialization is already done before the method is called */
4738 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
4742 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
4745 if (mini_is_gsharedvt_variable_klass (cfg, klass)) {
4748 mono_class_init (klass);
4749 size = mono_class_array_element_size (klass);
4752 mult_reg = alloc_preg (cfg);
4753 array_reg = arr->dreg;
4754 index_reg = index->dreg;
4756 #if SIZEOF_REGISTER == 8
4757 /* The array reg is 64 bits but the index reg is only 32 */
4758 if (COMPILE_LLVM (cfg)) {
4760 index2_reg = index_reg;
4762 index2_reg = alloc_preg (cfg);
4763 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
4766 if (index->type == STACK_I8) {
4767 index2_reg = alloc_preg (cfg);
4768 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
4770 index2_reg = index_reg;
4775 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
4777 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4778 if (size == 1 || size == 2 || size == 4 || size == 8) {
4779 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
4781 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], G_STRUCT_OFFSET (MonoArray, vector));
4782 ins->klass = mono_class_get_element_class (klass);
4783 ins->type = STACK_MP;
4789 add_reg = alloc_ireg_mp (cfg);
4792 MonoInst *rgctx_ins;
4795 g_assert (cfg->generic_sharing_context);
4796 context_used = mini_class_check_context_used (cfg, klass);
4797 g_assert (context_used);
4798 rgctx_ins = emit_get_gsharedvt_info (cfg, &klass->byval_arg, MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE);
4799 MONO_EMIT_NEW_BIALU (cfg, OP_IMUL, mult_reg, index2_reg, rgctx_ins->dreg);
4801 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
4803 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
4804 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
4805 ins->klass = mono_class_get_element_class (klass);
4806 ins->type = STACK_MP;
4807 MONO_ADD_INS (cfg->cbb, ins);
4812 #ifndef MONO_ARCH_EMULATE_MUL_DIV
4814 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
4816 int bounds_reg = alloc_preg (cfg);
4817 int add_reg = alloc_ireg_mp (cfg);
4818 int mult_reg = alloc_preg (cfg);
4819 int mult2_reg = alloc_preg (cfg);
4820 int low1_reg = alloc_preg (cfg);
4821 int low2_reg = alloc_preg (cfg);
4822 int high1_reg = alloc_preg (cfg);
4823 int high2_reg = alloc_preg (cfg);
4824 int realidx1_reg = alloc_preg (cfg);
4825 int realidx2_reg = alloc_preg (cfg);
4826 int sum_reg = alloc_preg (cfg);
4827 int index1, index2, tmpreg;
4831 mono_class_init (klass);
4832 size = mono_class_array_element_size (klass);
4834 index1 = index_ins1->dreg;
4835 index2 = index_ins2->dreg;
4837 #if SIZEOF_REGISTER == 8
4838 /* The array reg is 64 bits but the index reg is only 32 */
4839 if (COMPILE_LLVM (cfg)) {
4842 tmpreg = alloc_preg (cfg);
4843 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index1);
4845 tmpreg = alloc_preg (cfg);
4846 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index2);
4850 // FIXME: Do we need to do something here for i8 indexes, like in ldelema_1_ins ?
4854 /* range checking */
4855 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
4856 arr->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
4858 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
4859 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4860 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
4861 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
4862 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
4863 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
4864 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4866 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
4867 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4868 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
4869 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
4870 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, length));
4871 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
4872 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4874 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
4875 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
4876 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
4877 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
4878 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
4880 ins->type = STACK_MP;
4882 MONO_ADD_INS (cfg->cbb, ins);
4889 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
4893 MonoMethod *addr_method;
4896 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
4899 return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], TRUE);
4901 #ifndef MONO_ARCH_EMULATE_MUL_DIV
4902 /* emit_ldelema_2 depends on OP_LMUL */
4903 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS)) {
4904 return mini_emit_ldelema_2_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], sp [2]);
4908 element_size = mono_class_array_element_size (cmethod->klass->element_class);
4909 addr_method = mono_marshal_get_array_address (rank, element_size);
4910 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
4915 static MonoBreakPolicy
4916 always_insert_breakpoint (MonoMethod *method)
4918 return MONO_BREAK_POLICY_ALWAYS;
4921 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
4924 * mono_set_break_policy:
4925 * policy_callback: the new callback function
4927 * Allow embedders to decide wherther to actually obey breakpoint instructions
4928 * (both break IL instructions and Debugger.Break () method calls), for example
4929 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
4930 * untrusted or semi-trusted code.
4932 * @policy_callback will be called every time a break point instruction needs to
4933 * be inserted with the method argument being the method that calls Debugger.Break()
4934 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
4935 * if it wants the breakpoint to not be effective in the given method.
4936 * #MONO_BREAK_POLICY_ALWAYS is the default.
4939 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
4941 if (policy_callback)
4942 break_policy_func = policy_callback;
4944 break_policy_func = always_insert_breakpoint;
4948 should_insert_brekpoint (MonoMethod *method) {
4949 switch (break_policy_func (method)) {
4950 case MONO_BREAK_POLICY_ALWAYS:
4952 case MONO_BREAK_POLICY_NEVER:
4954 case MONO_BREAK_POLICY_ON_DBG:
4955 g_warning ("mdb no longer supported");
4958 g_warning ("Incorrect value returned from break policy callback");
4963 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
4965 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
4967 MonoInst *addr, *store, *load;
4968 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
4970 /* the bounds check is already done by the callers */
4971 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
4973 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
4974 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
4975 if (mini_type_is_reference (cfg, fsig->params [2]))
4976 emit_write_barrier (cfg, addr, load);
4978 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
4979 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
4986 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
4988 return mini_type_is_reference (cfg, &klass->byval_arg);
4992 emit_array_store (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, gboolean safety_checks)
4994 if (safety_checks && generic_class_is_reference_type (cfg, klass) &&
4995 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
4996 MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class, 1);
4997 MonoMethod *helper = mono_marshal_get_virtual_stelemref (obj_array);
4998 MonoInst *iargs [3];
5001 mono_class_setup_vtable (obj_array);
5002 g_assert (helper->slot);
5004 if (sp [0]->type != STACK_OBJ)
5006 if (sp [2]->type != STACK_OBJ)
5013 return mono_emit_method_call (cfg, helper, iargs, sp [0]);
5017 if (mini_is_gsharedvt_variable_klass (cfg, klass)) {
5020 // FIXME-VT: OP_ICONST optimization
5021 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
5022 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5023 ins->opcode = OP_STOREV_MEMBASE;
5024 } else if (sp [1]->opcode == OP_ICONST) {
5025 int array_reg = sp [0]->dreg;
5026 int index_reg = sp [1]->dreg;
5027 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
5030 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
5031 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
5033 MonoInst *addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], safety_checks);
5034 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5035 if (generic_class_is_reference_type (cfg, klass))
5036 emit_write_barrier (cfg, addr, sp [2]);
5043 emit_array_unsafe_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
5048 eklass = mono_class_from_mono_type (fsig->params [2]);
5050 eklass = mono_class_from_mono_type (fsig->ret);
5054 return emit_array_store (cfg, eklass, args, FALSE);
5056 MonoInst *ins, *addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
5057 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &eklass->byval_arg, addr->dreg, 0);
5063 is_unsafe_mov_compatible (MonoClass *param_klass, MonoClass *return_klass)
5067 //Only allow for valuetypes
5068 if (!param_klass->valuetype || !return_klass->valuetype)
5072 if (param_klass->has_references || return_klass->has_references)
5075 //And have the same size
5076 if (mono_class_value_size (param_klass, &align) != mono_class_value_size (return_klass, &align))
5082 emit_array_unsafe_mov (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args)
5084 MonoClass *param_klass = mono_class_from_mono_type (fsig->params [0]);
5085 MonoClass *return_klass = mono_class_from_mono_type (fsig->ret);
5087 //Valuetypes that are semantically equivalent
5088 if (is_unsafe_mov_compatible (param_klass, return_klass))
5091 //Arrays of valuetypes that are semantically equivalent
5092 if (param_klass->rank == 1 && return_klass->rank == 1 && is_unsafe_mov_compatible (param_klass->element_class, return_klass->element_class))
5099 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5101 #ifdef MONO_ARCH_SIMD_INTRINSICS
5102 MonoInst *ins = NULL;
5104 if (cfg->opt & MONO_OPT_SIMD) {
5105 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
5111 return mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
5115 emit_memory_barrier (MonoCompile *cfg, int kind)
5117 MonoInst *ins = NULL;
5118 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
5119 MONO_ADD_INS (cfg->cbb, ins);
5120 ins->backend.memory_barrier_kind = kind;
5126 llvm_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5128 MonoInst *ins = NULL;
5131 /* The LLVM backend supports these intrinsics */
5132 if (cmethod->klass == mono_defaults.math_class) {
5133 if (strcmp (cmethod->name, "Sin") == 0) {
5135 } else if (strcmp (cmethod->name, "Cos") == 0) {
5137 } else if (strcmp (cmethod->name, "Sqrt") == 0) {
5139 } else if (strcmp (cmethod->name, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8) {
5144 MONO_INST_NEW (cfg, ins, opcode);
5145 ins->type = STACK_R8;
5146 ins->dreg = mono_alloc_freg (cfg);
5147 ins->sreg1 = args [0]->dreg;
5148 MONO_ADD_INS (cfg->cbb, ins);
5152 if (cfg->opt & MONO_OPT_CMOV) {
5153 if (strcmp (cmethod->name, "Min") == 0) {
5154 if (fsig->params [0]->type == MONO_TYPE_I4)
5156 if (fsig->params [0]->type == MONO_TYPE_U4)
5157 opcode = OP_IMIN_UN;
5158 else if (fsig->params [0]->type == MONO_TYPE_I8)
5160 else if (fsig->params [0]->type == MONO_TYPE_U8)
5161 opcode = OP_LMIN_UN;
5162 } else if (strcmp (cmethod->name, "Max") == 0) {
5163 if (fsig->params [0]->type == MONO_TYPE_I4)
5165 if (fsig->params [0]->type == MONO_TYPE_U4)
5166 opcode = OP_IMAX_UN;
5167 else if (fsig->params [0]->type == MONO_TYPE_I8)
5169 else if (fsig->params [0]->type == MONO_TYPE_U8)
5170 opcode = OP_LMAX_UN;
5175 MONO_INST_NEW (cfg, ins, opcode);
5176 ins->type = fsig->params [0]->type == MONO_TYPE_I4 ? STACK_I4 : STACK_I8;
5177 ins->dreg = mono_alloc_ireg (cfg);
5178 ins->sreg1 = args [0]->dreg;
5179 ins->sreg2 = args [1]->dreg;
5180 MONO_ADD_INS (cfg->cbb, ins);
5188 mini_emit_inst_for_sharable_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5190 if (cmethod->klass == mono_defaults.array_class) {
5191 if (strcmp (cmethod->name, "UnsafeStore") == 0)
5192 return emit_array_unsafe_access (cfg, fsig, args, TRUE);
5193 else if (strcmp (cmethod->name, "UnsafeLoad") == 0)
5194 return emit_array_unsafe_access (cfg, fsig, args, FALSE);
5195 else if (strcmp (cmethod->name, "UnsafeMov") == 0)
5196 return emit_array_unsafe_mov (cfg, fsig, args);
5203 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5205 MonoInst *ins = NULL;
5207 static MonoClass *runtime_helpers_class = NULL;
5208 if (! runtime_helpers_class)
5209 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
5210 "System.Runtime.CompilerServices", "RuntimeHelpers");
5212 if (cmethod->klass == mono_defaults.string_class) {
5213 if (strcmp (cmethod->name, "get_Chars") == 0) {
5214 int dreg = alloc_ireg (cfg);
5215 int index_reg = alloc_preg (cfg);
5216 int mult_reg = alloc_preg (cfg);
5217 int add_reg = alloc_preg (cfg);
5219 #if SIZEOF_REGISTER == 8
5220 /* The array reg is 64 bits but the index reg is only 32 */
5221 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
5223 index_reg = args [1]->dreg;
5225 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
5227 #if defined(TARGET_X86) || defined(TARGET_AMD64)
5228 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, G_STRUCT_OFFSET (MonoString, chars));
5229 add_reg = ins->dreg;
5230 /* Avoid a warning */
5232 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5235 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
5236 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
5237 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5238 add_reg, G_STRUCT_OFFSET (MonoString, chars));
5240 type_from_op (ins, NULL, NULL);
5242 } else if (strcmp (cmethod->name, "get_Length") == 0) {
5243 int dreg = alloc_ireg (cfg);
5244 /* Decompose later to allow more optimizations */
5245 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
5246 ins->type = STACK_I4;
5247 ins->flags |= MONO_INST_FAULT;
5248 cfg->cbb->has_array_access = TRUE;
5249 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
5252 } else if (strcmp (cmethod->name, "InternalSetChar") == 0) {
5253 int mult_reg = alloc_preg (cfg);
5254 int add_reg = alloc_preg (cfg);
5256 /* The corlib functions check for oob already. */
5257 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
5258 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
5259 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, G_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
5260 return cfg->cbb->last_ins;
5263 } else if (cmethod->klass == mono_defaults.object_class) {
5265 if (strcmp (cmethod->name, "GetType") == 0) {
5266 int dreg = alloc_ireg_ref (cfg);
5267 int vt_reg = alloc_preg (cfg);
5268 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
5269 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, G_STRUCT_OFFSET (MonoVTable, type));
5270 type_from_op (ins, NULL, NULL);
5273 #if !defined(MONO_ARCH_EMULATE_MUL_DIV)
5274 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0 && !mono_gc_is_moving ()) {
5275 int dreg = alloc_ireg (cfg);
5276 int t1 = alloc_ireg (cfg);
5278 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
5279 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
5280 ins->type = STACK_I4;
5284 } else if (strcmp (cmethod->name, ".ctor") == 0) {
5285 MONO_INST_NEW (cfg, ins, OP_NOP);
5286 MONO_ADD_INS (cfg->cbb, ins);
5290 } else if (cmethod->klass == mono_defaults.array_class) {
5291 if (!cfg->gsharedvt && strcmp (cmethod->name + 1, "etGenericValueImpl") == 0)
5292 return emit_array_generic_access (cfg, fsig, args, *cmethod->name == 'S');
5294 #ifndef MONO_BIG_ARRAYS
5296 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
5299 if ((strcmp (cmethod->name, "GetLength") == 0 || strcmp (cmethod->name, "GetLowerBound") == 0) && args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
5300 int dreg = alloc_ireg (cfg);
5301 int bounds_reg = alloc_ireg_mp (cfg);
5302 MonoBasicBlock *end_bb, *szarray_bb;
5303 gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
5305 NEW_BBLOCK (cfg, end_bb);
5306 NEW_BBLOCK (cfg, szarray_bb);
5308 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
5309 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
5310 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
5311 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
5312 /* Non-szarray case */
5314 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5315 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
5317 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5318 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5319 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
5320 MONO_START_BB (cfg, szarray_bb);
5323 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5324 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
5326 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
5327 MONO_START_BB (cfg, end_bb);
5329 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
5330 ins->type = STACK_I4;
5336 if (cmethod->name [0] != 'g')
5339 if (strcmp (cmethod->name, "get_Rank") == 0) {
5340 int dreg = alloc_ireg (cfg);
5341 int vtable_reg = alloc_preg (cfg);
5342 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
5343 args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
5344 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
5345 vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
5346 type_from_op (ins, NULL, NULL);
5349 } else if (strcmp (cmethod->name, "get_Length") == 0) {
5350 int dreg = alloc_ireg (cfg);
5352 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5353 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
5354 type_from_op (ins, NULL, NULL);
5359 } else if (cmethod->klass == runtime_helpers_class) {
5361 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0) {
5362 EMIT_NEW_ICONST (cfg, ins, G_STRUCT_OFFSET (MonoString, chars));
5366 } else if (cmethod->klass == mono_defaults.thread_class) {
5367 if (strcmp (cmethod->name, "SpinWait_nop") == 0) {
5368 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
5369 MONO_ADD_INS (cfg->cbb, ins);
5371 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0) {
5372 return emit_memory_barrier (cfg, FullBarrier);
5374 } else if (cmethod->klass == mono_defaults.monitor_class) {
5376 /* FIXME this should be integrated to the check below once we support the trampoline version */
5377 #if defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
5378 if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 2) {
5379 MonoMethod *fast_method = NULL;
5381 /* Avoid infinite recursion */
5382 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN && !strcmp (cfg->method->name, "FastMonitorEnterV4"))
5385 fast_method = mono_monitor_get_fast_path (cmethod);
5389 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
5393 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
5394 if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 1) {
5397 if (COMPILE_LLVM (cfg)) {
5399 * Pass the argument normally, the LLVM backend will handle the
5400 * calling convention problems.
5402 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
5404 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
5405 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
5406 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
5407 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
5410 return (MonoInst*)call;
5411 } else if (strcmp (cmethod->name, "Exit") == 0) {
5414 if (COMPILE_LLVM (cfg)) {
5415 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
5417 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
5418 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
5419 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
5420 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
5423 return (MonoInst*)call;
5425 #elif defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
5427 MonoMethod *fast_method = NULL;
5429 /* Avoid infinite recursion */
5430 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN &&
5431 (strcmp (cfg->method->name, "FastMonitorEnter") == 0 ||
5432 strcmp (cfg->method->name, "FastMonitorExit") == 0))
5435 if ((strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 2) ||
5436 strcmp (cmethod->name, "Exit") == 0)
5437 fast_method = mono_monitor_get_fast_path (cmethod);
5441 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
5444 } else if (cmethod->klass->image == mono_defaults.corlib &&
5445 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
5446 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
5449 #if SIZEOF_REGISTER == 8
5450 if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
5451 /* 64 bit reads are already atomic */
5452 MONO_INST_NEW (cfg, ins, OP_LOADI8_MEMBASE);
5453 ins->dreg = mono_alloc_preg (cfg);
5454 ins->inst_basereg = args [0]->dreg;
5455 ins->inst_offset = 0;
5456 MONO_ADD_INS (cfg->cbb, ins);
5460 #ifdef MONO_ARCH_HAVE_ATOMIC_ADD
5461 if (strcmp (cmethod->name, "Increment") == 0) {
5462 MonoInst *ins_iconst;
5465 if (fsig->params [0]->type == MONO_TYPE_I4) {
5466 opcode = OP_ATOMIC_ADD_NEW_I4;
5467 cfg->has_atomic_add_new_i4 = TRUE;
5469 #if SIZEOF_REGISTER == 8
5470 else if (fsig->params [0]->type == MONO_TYPE_I8)
5471 opcode = OP_ATOMIC_ADD_NEW_I8;
5474 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
5475 ins_iconst->inst_c0 = 1;
5476 ins_iconst->dreg = mono_alloc_ireg (cfg);
5477 MONO_ADD_INS (cfg->cbb, ins_iconst);
5479 MONO_INST_NEW (cfg, ins, opcode);
5480 ins->dreg = mono_alloc_ireg (cfg);
5481 ins->inst_basereg = args [0]->dreg;
5482 ins->inst_offset = 0;
5483 ins->sreg2 = ins_iconst->dreg;
5484 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
5485 MONO_ADD_INS (cfg->cbb, ins);
5487 } else if (strcmp (cmethod->name, "Decrement") == 0) {
5488 MonoInst *ins_iconst;
5491 if (fsig->params [0]->type == MONO_TYPE_I4) {
5492 opcode = OP_ATOMIC_ADD_NEW_I4;
5493 cfg->has_atomic_add_new_i4 = TRUE;
5495 #if SIZEOF_REGISTER == 8
5496 else if (fsig->params [0]->type == MONO_TYPE_I8)
5497 opcode = OP_ATOMIC_ADD_NEW_I8;
5500 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
5501 ins_iconst->inst_c0 = -1;
5502 ins_iconst->dreg = mono_alloc_ireg (cfg);
5503 MONO_ADD_INS (cfg->cbb, ins_iconst);
5505 MONO_INST_NEW (cfg, ins, opcode);
5506 ins->dreg = mono_alloc_ireg (cfg);
5507 ins->inst_basereg = args [0]->dreg;
5508 ins->inst_offset = 0;
5509 ins->sreg2 = ins_iconst->dreg;
5510 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
5511 MONO_ADD_INS (cfg->cbb, ins);
5513 } else if (strcmp (cmethod->name, "Add") == 0) {
5516 if (fsig->params [0]->type == MONO_TYPE_I4) {
5517 opcode = OP_ATOMIC_ADD_NEW_I4;
5518 cfg->has_atomic_add_new_i4 = TRUE;
5520 #if SIZEOF_REGISTER == 8
5521 else if (fsig->params [0]->type == MONO_TYPE_I8)
5522 opcode = OP_ATOMIC_ADD_NEW_I8;
5526 MONO_INST_NEW (cfg, ins, opcode);
5527 ins->dreg = mono_alloc_ireg (cfg);
5528 ins->inst_basereg = args [0]->dreg;
5529 ins->inst_offset = 0;
5530 ins->sreg2 = args [1]->dreg;
5531 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
5532 MONO_ADD_INS (cfg->cbb, ins);
5535 #endif /* MONO_ARCH_HAVE_ATOMIC_ADD */
5537 #ifdef MONO_ARCH_HAVE_ATOMIC_EXCHANGE
5538 if (strcmp (cmethod->name, "Exchange") == 0) {
5540 gboolean is_ref = fsig->params [0]->type == MONO_TYPE_OBJECT;
5542 if (fsig->params [0]->type == MONO_TYPE_I4) {
5543 opcode = OP_ATOMIC_EXCHANGE_I4;
5544 cfg->has_atomic_exchange_i4 = TRUE;
5546 #if SIZEOF_REGISTER == 8
5547 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I8) ||
5548 (fsig->params [0]->type == MONO_TYPE_I))
5549 opcode = OP_ATOMIC_EXCHANGE_I8;
5551 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I)) {
5552 opcode = OP_ATOMIC_EXCHANGE_I4;
5553 cfg->has_atomic_exchange_i4 = TRUE;
5559 MONO_INST_NEW (cfg, ins, opcode);
5560 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : mono_alloc_ireg (cfg);
5561 ins->inst_basereg = args [0]->dreg;
5562 ins->inst_offset = 0;
5563 ins->sreg2 = args [1]->dreg;
5564 MONO_ADD_INS (cfg->cbb, ins);
5566 switch (fsig->params [0]->type) {
5568 ins->type = STACK_I4;
5572 ins->type = STACK_I8;
5574 case MONO_TYPE_OBJECT:
5575 ins->type = STACK_OBJ;
5578 g_assert_not_reached ();
5581 if (cfg->gen_write_barriers && is_ref)
5582 emit_write_barrier (cfg, args [0], args [1]);
5584 #endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
5586 #ifdef MONO_ARCH_HAVE_ATOMIC_CAS
5587 if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
5589 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [1]);
5590 if (fsig->params [1]->type == MONO_TYPE_I4)
5592 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I)
5593 size = sizeof (gpointer);
5594 else if (sizeof (gpointer) == 8 && fsig->params [1]->type == MONO_TYPE_I8)
5597 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
5598 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
5599 ins->sreg1 = args [0]->dreg;
5600 ins->sreg2 = args [1]->dreg;
5601 ins->sreg3 = args [2]->dreg;
5602 ins->type = STACK_I4;
5603 MONO_ADD_INS (cfg->cbb, ins);
5604 } else if (size == 8) {
5605 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I8);
5606 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
5607 ins->sreg1 = args [0]->dreg;
5608 ins->sreg2 = args [1]->dreg;
5609 ins->sreg3 = args [2]->dreg;
5610 ins->type = STACK_I8;
5611 MONO_ADD_INS (cfg->cbb, ins);
5613 /* g_assert_not_reached (); */
5615 if (cfg->gen_write_barriers && is_ref)
5616 emit_write_barrier (cfg, args [0], args [1]);
5618 #endif /* MONO_ARCH_HAVE_ATOMIC_CAS */
5620 if (strcmp (cmethod->name, "MemoryBarrier") == 0)
5621 ins = emit_memory_barrier (cfg, FullBarrier);
5625 } else if (cmethod->klass->image == mono_defaults.corlib) {
5626 if (cmethod->name [0] == 'B' && strcmp (cmethod->name, "Break") == 0
5627 && strcmp (cmethod->klass->name, "Debugger") == 0) {
5628 if (should_insert_brekpoint (cfg->method)) {
5629 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
5631 MONO_INST_NEW (cfg, ins, OP_NOP);
5632 MONO_ADD_INS (cfg->cbb, ins);
5636 if (cmethod->name [0] == 'g' && strcmp (cmethod->name, "get_IsRunningOnWindows") == 0
5637 && strcmp (cmethod->klass->name, "Environment") == 0) {
5639 EMIT_NEW_ICONST (cfg, ins, 1);
5641 EMIT_NEW_ICONST (cfg, ins, 0);
5645 } else if (cmethod->klass == mono_defaults.math_class) {
5647 * There is general branches code for Min/Max, but it does not work for
5649 * http://everything2.com/?node_id=1051618
5651 } else if ((!strcmp (cmethod->klass->image->assembly->aname.name, "MonoMac") || !strcmp (cmethod->klass->image->assembly->aname.name, "monotouch")) && !strcmp (cmethod->klass->name, "Selector") && !strcmp (cmethod->name, "GetHandle") && cfg->compile_aot && (args [0]->opcode == OP_GOT_ENTRY || args[0]->opcode == OP_AOTCONST)) {
5652 #ifdef MONO_ARCH_HAVE_OBJC_GET_SELECTOR
5654 MonoJumpInfoToken *ji;
5657 cfg->disable_llvm = TRUE;
5659 if (args [0]->opcode == OP_GOT_ENTRY) {
5660 pi = args [0]->inst_p1;
5661 g_assert (pi->opcode == OP_PATCH_INFO);
5662 g_assert ((int)pi->inst_p1 == MONO_PATCH_INFO_LDSTR);
5665 g_assert ((int)args [0]->inst_p1 == MONO_PATCH_INFO_LDSTR);
5666 ji = args [0]->inst_p0;
5669 NULLIFY_INS (args [0]);
5672 s = mono_ldstr (cfg->domain, ji->image, mono_metadata_token_index (ji->token));
5673 MONO_INST_NEW (cfg, ins, OP_OBJC_GET_SELECTOR);
5674 ins->dreg = mono_alloc_ireg (cfg);
5676 ins->inst_p0 = mono_string_to_utf8 (s);
5677 MONO_ADD_INS (cfg->cbb, ins);
5682 #ifdef MONO_ARCH_SIMD_INTRINSICS
5683 if (cfg->opt & MONO_OPT_SIMD) {
5684 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
5690 ins = mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
5694 if (COMPILE_LLVM (cfg)) {
5695 ins = llvm_emit_inst_for_method (cfg, cmethod, fsig, args);
5700 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
5704 * This entry point could be used later for arbitrary method
5707 inline static MonoInst*
5708 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
5709 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
5711 if (method->klass == mono_defaults.string_class) {
5712 /* managed string allocation support */
5713 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS) && !(cfg->opt & MONO_OPT_SHARED)) {
5714 MonoInst *iargs [2];
5715 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
5716 MonoMethod *managed_alloc = NULL;
5718 g_assert (vtable); /*Should not fail since it System.String*/
5719 #ifndef MONO_CROSS_COMPILE
5720 managed_alloc = mono_gc_get_managed_allocator (method->klass, FALSE);
5724 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
5725 iargs [1] = args [0];
5726 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
5733 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
5735 MonoInst *store, *temp;
5738 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
5739 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
5742 * FIXME: We should use *args++ = sp [0], but that would mean the arg
5743 * would be different than the MonoInst's used to represent arguments, and
5744 * the ldelema implementation can't deal with that.
5745 * Solution: When ldelema is used on an inline argument, create a var for
5746 * it, emit ldelema on that var, and emit the saving code below in
5747 * inline_method () if needed.
5749 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
5750 cfg->args [i] = temp;
5751 /* This uses cfg->args [i] which is set by the preceeding line */
5752 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
5753 store->cil_code = sp [0]->cil_code;
5758 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
5759 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
5761 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
5763 check_inline_called_method_name_limit (MonoMethod *called_method)
5766 static const char *limit = NULL;
5768 if (limit == NULL) {
5769 const char *limit_string = g_getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
5771 if (limit_string != NULL)
5772 limit = limit_string;
5777 if (limit [0] != '\0') {
5778 char *called_method_name = mono_method_full_name (called_method, TRUE);
5780 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
5781 g_free (called_method_name);
5783 //return (strncmp_result <= 0);
5784 return (strncmp_result == 0);
5791 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
5793 check_inline_caller_method_name_limit (MonoMethod *caller_method)
5796 static const char *limit = NULL;
5798 if (limit == NULL) {
5799 const char *limit_string = g_getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
5800 if (limit_string != NULL) {
5801 limit = limit_string;
5807 if (limit [0] != '\0') {
5808 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
5810 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
5811 g_free (caller_method_name);
5813 //return (strncmp_result <= 0);
5814 return (strncmp_result == 0);
5822 emit_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
5824 static double r8_0 = 0.0;
5828 rtype = mini_replace_type (rtype);
5832 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
5833 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
5834 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
5835 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
5836 MONO_EMIT_NEW_I8CONST (cfg, dreg, 0);
5837 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
5838 MONO_INST_NEW (cfg, ins, OP_R8CONST);
5839 ins->type = STACK_R8;
5840 ins->inst_p0 = (void*)&r8_0;
5842 MONO_ADD_INS (cfg->cbb, ins);
5843 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
5844 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
5845 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
5846 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (cfg, rtype)) {
5847 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
5849 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
5854 emit_dummy_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
5858 rtype = mini_replace_type (rtype);
5862 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_PCONST);
5863 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
5864 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_ICONST);
5865 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
5866 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_I8CONST);
5867 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
5868 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R8CONST);
5869 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
5870 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
5871 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
5872 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (cfg, rtype)) {
5873 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
5875 emit_init_rvar (cfg, dreg, rtype);
5879 /* If INIT is FALSE, emit dummy initialization statements to keep the IR valid */
5881 emit_init_local (MonoCompile *cfg, int local, MonoType *type, gboolean init)
5883 MonoInst *var = cfg->locals [local];
5884 if (COMPILE_SOFT_FLOAT (cfg)) {
5886 int reg = alloc_dreg (cfg, var->type);
5887 emit_init_rvar (cfg, reg, type);
5888 EMIT_NEW_LOCSTORE (cfg, store, local, cfg->cbb->last_ins);
5891 emit_init_rvar (cfg, var->dreg, type);
5893 emit_dummy_init_rvar (cfg, var->dreg, type);
5898 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
5899 guchar *ip, guint real_offset, GList *dont_inline, gboolean inline_always)
5901 MonoInst *ins, *rvar = NULL;
5902 MonoMethodHeader *cheader;
5903 MonoBasicBlock *ebblock, *sbblock;
5905 MonoMethod *prev_inlined_method;
5906 MonoInst **prev_locals, **prev_args;
5907 MonoType **prev_arg_types;
5908 guint prev_real_offset;
5909 GHashTable *prev_cbb_hash;
5910 MonoBasicBlock **prev_cil_offset_to_bb;
5911 MonoBasicBlock *prev_cbb;
5912 unsigned char* prev_cil_start;
5913 guint32 prev_cil_offset_to_bb_len;
5914 MonoMethod *prev_current_method;
5915 MonoGenericContext *prev_generic_context;
5916 gboolean ret_var_set, prev_ret_var_set, virtual = FALSE;
5918 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
5920 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
5921 if ((! inline_always) && ! check_inline_called_method_name_limit (cmethod))
5924 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
5925 if ((! inline_always) && ! check_inline_caller_method_name_limit (cfg->method))
5929 if (cfg->verbose_level > 2)
5930 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
5932 if (!cmethod->inline_info) {
5933 cfg->stat_inlineable_methods++;
5934 cmethod->inline_info = 1;
5937 /* allocate local variables */
5938 cheader = mono_method_get_header (cmethod);
5940 if (cheader == NULL || mono_loader_get_last_error ()) {
5941 MonoLoaderError *error = mono_loader_get_last_error ();
5944 mono_metadata_free_mh (cheader);
5945 if (inline_always && error)
5946 mono_cfg_set_exception (cfg, error->exception_type);
5948 mono_loader_clear_error ();
5952 /*Must verify before creating locals as it can cause the JIT to assert.*/
5953 if (mono_compile_is_broken (cfg, cmethod, FALSE)) {
5954 mono_metadata_free_mh (cheader);
5958 /* allocate space to store the return value */
5959 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
5960 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
5963 prev_locals = cfg->locals;
5964 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
5965 for (i = 0; i < cheader->num_locals; ++i)
5966 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
5968 /* allocate start and end blocks */
5969 /* This is needed so if the inline is aborted, we can clean up */
5970 NEW_BBLOCK (cfg, sbblock);
5971 sbblock->real_offset = real_offset;
5973 NEW_BBLOCK (cfg, ebblock);
5974 ebblock->block_num = cfg->num_bblocks++;
5975 ebblock->real_offset = real_offset;
5977 prev_args = cfg->args;
5978 prev_arg_types = cfg->arg_types;
5979 prev_inlined_method = cfg->inlined_method;
5980 cfg->inlined_method = cmethod;
5981 cfg->ret_var_set = FALSE;
5982 cfg->inline_depth ++;
5983 prev_real_offset = cfg->real_offset;
5984 prev_cbb_hash = cfg->cbb_hash;
5985 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
5986 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
5987 prev_cil_start = cfg->cil_start;
5988 prev_cbb = cfg->cbb;
5989 prev_current_method = cfg->current_method;
5990 prev_generic_context = cfg->generic_context;
5991 prev_ret_var_set = cfg->ret_var_set;
5993 if (*ip == CEE_CALLVIRT && !(cmethod->flags & METHOD_ATTRIBUTE_STATIC))
5996 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, dont_inline, sp, real_offset, virtual);
5998 ret_var_set = cfg->ret_var_set;
6000 cfg->inlined_method = prev_inlined_method;
6001 cfg->real_offset = prev_real_offset;
6002 cfg->cbb_hash = prev_cbb_hash;
6003 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
6004 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
6005 cfg->cil_start = prev_cil_start;
6006 cfg->locals = prev_locals;
6007 cfg->args = prev_args;
6008 cfg->arg_types = prev_arg_types;
6009 cfg->current_method = prev_current_method;
6010 cfg->generic_context = prev_generic_context;
6011 cfg->ret_var_set = prev_ret_var_set;
6012 cfg->inline_depth --;
6014 if ((costs >= 0 && costs < 60) || inline_always) {
6015 if (cfg->verbose_level > 2)
6016 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
6018 cfg->stat_inlined_methods++;
6020 /* always add some code to avoid block split failures */
6021 MONO_INST_NEW (cfg, ins, OP_NOP);
6022 MONO_ADD_INS (prev_cbb, ins);
6024 prev_cbb->next_bb = sbblock;
6025 link_bblock (cfg, prev_cbb, sbblock);
6028 * Get rid of the begin and end bblocks if possible to aid local
6031 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
6033 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
6034 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
6036 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
6037 MonoBasicBlock *prev = ebblock->in_bb [0];
6038 mono_merge_basic_blocks (cfg, prev, ebblock);
6040 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
6041 mono_merge_basic_blocks (cfg, prev_cbb, prev);
6042 cfg->cbb = prev_cbb;
6046 * Its possible that the rvar is set in some prev bblock, but not in others.
6052 for (i = 0; i < ebblock->in_count; ++i) {
6053 bb = ebblock->in_bb [i];
6055 if (bb->last_ins && bb->last_ins->opcode == OP_NOT_REACHED) {
6058 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
6068 * If the inlined method contains only a throw, then the ret var is not
6069 * set, so set it to a dummy value.
6072 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
6074 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
6077 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
6080 if (cfg->verbose_level > 2)
6081 printf ("INLINE ABORTED %s (cost %d)\n", mono_method_full_name (cmethod, TRUE), costs);
6082 cfg->exception_type = MONO_EXCEPTION_NONE;
6083 mono_loader_clear_error ();
6085 /* This gets rid of the newly added bblocks */
6086 cfg->cbb = prev_cbb;
6088 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
6093 * Some of these comments may well be out-of-date.
6094 * Design decisions: we do a single pass over the IL code (and we do bblock
6095 * splitting/merging in the few cases when it's required: a back jump to an IL
6096 * address that was not already seen as bblock starting point).
6097 * Code is validated as we go (full verification is still better left to metadata/verify.c).
6098 * Complex operations are decomposed in simpler ones right away. We need to let the
6099 * arch-specific code peek and poke inside this process somehow (except when the
6100 * optimizations can take advantage of the full semantic info of coarse opcodes).
6101 * All the opcodes of the form opcode.s are 'normalized' to opcode.
6102 * MonoInst->opcode initially is the IL opcode or some simplification of that
6103 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
6104 * opcode with value bigger than OP_LAST.
6105 * At this point the IR can be handed over to an interpreter, a dumb code generator
6106 * or to the optimizing code generator that will translate it to SSA form.
6108 * Profiling directed optimizations.
6109 * We may compile by default with few or no optimizations and instrument the code
6110 * or the user may indicate what methods to optimize the most either in a config file
6111 * or through repeated runs where the compiler applies offline the optimizations to
6112 * each method and then decides if it was worth it.
6115 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
6116 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
6117 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
6118 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
6119 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
6120 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
6121 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
6122 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) {cfg->exception_ptr = klass; LOAD_ERROR;}
6124 /* offset from br.s -> br like opcodes */
6125 #define BIG_BRANCH_OFFSET 13
6128 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
6130 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
6132 return b == NULL || b == bb;
6136 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
6138 unsigned char *ip = start;
6139 unsigned char *target;
6142 MonoBasicBlock *bblock;
6143 const MonoOpcode *opcode;
6146 cli_addr = ip - start;
6147 i = mono_opcode_value ((const guint8 **)&ip, end);
6150 opcode = &mono_opcodes [i];
6151 switch (opcode->argument) {
6152 case MonoInlineNone:
6155 case MonoInlineString:
6156 case MonoInlineType:
6157 case MonoInlineField:
6158 case MonoInlineMethod:
6161 case MonoShortInlineR:
6168 case MonoShortInlineVar:
6169 case MonoShortInlineI:
6172 case MonoShortInlineBrTarget:
6173 target = start + cli_addr + 2 + (signed char)ip [1];
6174 GET_BBLOCK (cfg, bblock, target);
6177 GET_BBLOCK (cfg, bblock, ip);
6179 case MonoInlineBrTarget:
6180 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
6181 GET_BBLOCK (cfg, bblock, target);
6184 GET_BBLOCK (cfg, bblock, ip);
6186 case MonoInlineSwitch: {
6187 guint32 n = read32 (ip + 1);
6190 cli_addr += 5 + 4 * n;
6191 target = start + cli_addr;
6192 GET_BBLOCK (cfg, bblock, target);
6194 for (j = 0; j < n; ++j) {
6195 target = start + cli_addr + (gint32)read32 (ip);
6196 GET_BBLOCK (cfg, bblock, target);
6206 g_assert_not_reached ();
6209 if (i == CEE_THROW) {
6210 unsigned char *bb_start = ip - 1;
6212 /* Find the start of the bblock containing the throw */
6214 while ((bb_start >= start) && !bblock) {
6215 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
6219 bblock->out_of_line = 1;
6229 static inline MonoMethod *
6230 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
6234 if (m->wrapper_type != MONO_WRAPPER_NONE) {
6235 method = mono_method_get_wrapper_data (m, token);
6237 method = mono_class_inflate_generic_method (method, context);
6239 method = mono_get_method_full (m->klass->image, token, klass, context);
6245 static inline MonoMethod *
6246 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
6248 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
6250 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
6256 static inline MonoClass*
6257 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
6261 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6262 klass = mono_method_get_wrapper_data (method, token);
6264 klass = mono_class_inflate_generic_class (klass, context);
6266 klass = mono_class_get_full (method->klass->image, token, context);
6269 mono_class_init (klass);
6273 static inline MonoMethodSignature*
6274 mini_get_signature (MonoMethod *method, guint32 token, MonoGenericContext *context)
6276 MonoMethodSignature *fsig;
6278 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6281 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
6283 fsig = mono_inflate_generic_signature (fsig, context, &error);
6285 g_assert (mono_error_ok (&error));
6288 fsig = mono_metadata_parse_signature (method->klass->image, token);
6294 * Returns TRUE if the JIT should abort inlining because "callee"
6295 * is influenced by security attributes.
6298 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
6302 if ((cfg->method != caller) && mono_security_method_has_declsec (callee)) {
6306 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
6307 if (result == MONO_JIT_SECURITY_OK)
6310 if (result == MONO_JIT_LINKDEMAND_ECMA) {
6311 /* Generate code to throw a SecurityException before the actual call/link */
6312 MonoSecurityManager *secman = mono_security_manager_get_methods ();
6315 NEW_ICONST (cfg, args [0], 4);
6316 NEW_METHODCONST (cfg, args [1], caller);
6317 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
6318 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
6319 /* don't hide previous results */
6320 mono_cfg_set_exception (cfg, MONO_EXCEPTION_SECURITY_LINKDEMAND);
6321 cfg->exception_data = result;
6329 throw_exception (void)
6331 static MonoMethod *method = NULL;
6334 MonoSecurityManager *secman = mono_security_manager_get_methods ();
6335 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
6342 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
6344 MonoMethod *thrower = throw_exception ();
6347 EMIT_NEW_PCONST (cfg, args [0], ex);
6348 mono_emit_method_call (cfg, thrower, args, NULL);
6352 * Return the original method is a wrapper is specified. We can only access
6353 * the custom attributes from the original method.
6356 get_original_method (MonoMethod *method)
6358 if (method->wrapper_type == MONO_WRAPPER_NONE)
6361 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
6362 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
6365 /* in other cases we need to find the original method */
6366 return mono_marshal_method_from_wrapper (method);
6370 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
6371 MonoBasicBlock *bblock, unsigned char *ip)
6373 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
6374 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
6376 emit_throw_exception (cfg, ex);
6380 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
6381 MonoBasicBlock *bblock, unsigned char *ip)
6383 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
6384 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
6386 emit_throw_exception (cfg, ex);
6390 * Check that the IL instructions at ip are the array initialization
6391 * sequence and return the pointer to the data and the size.
6394 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
6397 * newarr[System.Int32]
6399 * ldtoken field valuetype ...
6400 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
6402 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
6403 guint32 token = read32 (ip + 7);
6404 guint32 field_token = read32 (ip + 2);
6405 guint32 field_index = field_token & 0xffffff;
6407 const char *data_ptr;
6409 MonoMethod *cmethod;
6410 MonoClass *dummy_class;
6411 MonoClassField *field = mono_field_from_token (method->klass->image, field_token, &dummy_class, NULL);
6417 *out_field_token = field_token;
6419 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
6422 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
6424 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
6425 case MONO_TYPE_BOOLEAN:
6429 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
6430 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
6431 case MONO_TYPE_CHAR:
6448 if (size > mono_type_size (field->type, &dummy_align))
6451 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
6452 if (!method->klass->image->dynamic) {
6453 field_index = read32 (ip + 2) & 0xffffff;
6454 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
6455 data_ptr = mono_image_rva_map (method->klass->image, rva);
6456 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
6457 /* for aot code we do the lookup on load */
6458 if (aot && data_ptr)
6459 return GUINT_TO_POINTER (rva);
6461 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
6463 data_ptr = mono_field_get_data (field);
6471 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
6473 char *method_fname = mono_method_full_name (method, TRUE);
6475 MonoMethodHeader *header = mono_method_get_header (method);
6477 if (header->code_size == 0)
6478 method_code = g_strdup ("method body is empty.");
6480 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
6481 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
6482 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
6483 g_free (method_fname);
6484 g_free (method_code);
6485 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
6489 set_exception_object (MonoCompile *cfg, MonoException *exception)
6491 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OBJECT_SUPPLIED);
6492 MONO_GC_REGISTER_ROOT_SINGLE (cfg->exception_ptr);
6493 cfg->exception_ptr = exception;
6497 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
6500 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
6501 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
6502 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
6503 /* Optimize reg-reg moves away */
6505 * Can't optimize other opcodes, since sp[0] might point to
6506 * the last ins of a decomposed opcode.
6508 sp [0]->dreg = (cfg)->locals [n]->dreg;
6510 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
6515 * ldloca inhibits many optimizations so try to get rid of it in common
6518 static inline unsigned char *
6519 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
6529 local = read16 (ip + 2);
6533 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
6534 /* From the INITOBJ case */
6535 token = read32 (ip + 2);
6536 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
6537 CHECK_TYPELOAD (klass);
6538 type = mini_replace_type (&klass->byval_arg);
6539 emit_init_local (cfg, local, type, TRUE);
6547 is_exception_class (MonoClass *class)
6550 if (class == mono_defaults.exception_class)
6552 class = class->parent;
6558 * is_jit_optimizer_disabled:
6560 * Determine whenever M's assembly has a DebuggableAttribute with the
6561 * IsJITOptimizerDisabled flag set.
6564 is_jit_optimizer_disabled (MonoMethod *m)
6566 MonoAssembly *ass = m->klass->image->assembly;
6567 MonoCustomAttrInfo* attrs;
6568 static MonoClass *klass;
6570 gboolean val = FALSE;
6573 if (ass->jit_optimizer_disabled_inited)
6574 return ass->jit_optimizer_disabled;
6577 klass = mono_class_from_name (mono_defaults.corlib, "System.Diagnostics", "DebuggableAttribute");
6580 ass->jit_optimizer_disabled = FALSE;
6581 mono_memory_barrier ();
6582 ass->jit_optimizer_disabled_inited = TRUE;
6586 attrs = mono_custom_attrs_from_assembly (ass);
6588 for (i = 0; i < attrs->num_attrs; ++i) {
6589 MonoCustomAttrEntry *attr = &attrs->attrs [i];
6592 MonoMethodSignature *sig;
6594 if (!attr->ctor || attr->ctor->klass != klass)
6596 /* Decode the attribute. See reflection.c */
6597 len = attr->data_size;
6598 p = (const char*)attr->data;
6599 g_assert (read16 (p) == 0x0001);
6602 // FIXME: Support named parameters
6603 sig = mono_method_signature (attr->ctor);
6604 if (sig->param_count != 2 || sig->params [0]->type != MONO_TYPE_BOOLEAN || sig->params [1]->type != MONO_TYPE_BOOLEAN)
6606 /* Two boolean arguments */
6610 mono_custom_attrs_free (attrs);
6613 ass->jit_optimizer_disabled = val;
6614 mono_memory_barrier ();
6615 ass->jit_optimizer_disabled_inited = TRUE;
6621 is_supported_tail_call (MonoCompile *cfg, MonoMethod *method, MonoMethod *cmethod, MonoMethodSignature *fsig, int call_opcode)
6623 gboolean supported_tail_call;
6626 #ifdef MONO_ARCH_HAVE_OP_TAIL_CALL
6627 supported_tail_call = mono_arch_tail_call_supported (cfg, mono_method_signature (method), mono_method_signature (cmethod));
6629 supported_tail_call = mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret);
6632 for (i = 0; i < fsig->param_count; ++i) {
6633 if (fsig->params [i]->byref || fsig->params [i]->type == MONO_TYPE_PTR || fsig->params [i]->type == MONO_TYPE_FNPTR)
6634 /* These can point to the current method's stack */
6635 supported_tail_call = FALSE;
6637 if (fsig->hasthis && cmethod->klass->valuetype)
6638 /* this might point to the current method's stack */
6639 supported_tail_call = FALSE;
6640 if (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)
6641 supported_tail_call = FALSE;
6642 if (cfg->method->save_lmf)
6643 supported_tail_call = FALSE;
6644 if (cmethod->wrapper_type && cmethod->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
6645 supported_tail_call = FALSE;
6646 if (call_opcode != CEE_CALL)
6647 supported_tail_call = FALSE;
6649 /* Debugging support */
6651 if (supported_tail_call) {
6652 if (!mono_debug_count ())
6653 supported_tail_call = FALSE;
6657 return supported_tail_call;
6660 /* the JIT intercepts ldflda instructions to the tlsdata field in ThreadLocal<T> and redirects
6661 * it to the thread local value based on the tls_offset field. Every other kind of access to
6662 * the field causes an assert.
6665 is_magic_tls_access (MonoClassField *field)
6667 if (strcmp (field->name, "tlsdata"))
6669 if (strcmp (field->parent->name, "ThreadLocal`1"))
6671 return field->parent->image == mono_defaults.corlib;
6674 /* emits the code needed to access a managed tls var (like ThreadStatic)
6675 * with the value of the tls offset in offset_reg. thread_ins represents the MonoInternalThread
6676 * pointer for the current thread.
6677 * Returns the MonoInst* representing the address of the tls var.
6680 emit_managed_static_data_access (MonoCompile *cfg, MonoInst *thread_ins, int offset_reg)
6683 int static_data_reg, array_reg, dreg;
6684 int offset2_reg, idx_reg;
6685 // inlined access to the tls data
6686 // idx = (offset >> 24) - 1;
6687 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
6688 static_data_reg = alloc_ireg (cfg);
6689 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, G_STRUCT_OFFSET (MonoInternalThread, static_data));
6690 idx_reg = alloc_ireg (cfg);
6691 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
6692 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
6693 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
6694 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
6695 array_reg = alloc_ireg (cfg);
6696 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
6697 offset2_reg = alloc_ireg (cfg);
6698 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
6699 dreg = alloc_ireg (cfg);
6700 EMIT_NEW_BIALU (cfg, addr, OP_PADD, dreg, array_reg, offset2_reg);
6705 * redirect access to the tlsdata field to the tls var given by the tls_offset field.
6706 * this address is cached per-method in cached_tls_addr.
6709 create_magic_tls_access (MonoCompile *cfg, MonoClassField *tls_field, MonoInst **cached_tls_addr, MonoInst *thread_local)
6711 MonoInst *load, *addr, *temp, *store, *thread_ins;
6712 MonoClassField *offset_field;
6714 if (*cached_tls_addr) {
6715 EMIT_NEW_TEMPLOAD (cfg, addr, (*cached_tls_addr)->inst_c0);
6718 thread_ins = mono_get_thread_intrinsic (cfg);
6719 offset_field = mono_class_get_field_from_name (tls_field->parent, "tls_offset");
6721 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, offset_field->type, thread_local->dreg, offset_field->offset);
6723 MONO_ADD_INS (cfg->cbb, thread_ins);
6725 MonoMethod *thread_method;
6726 thread_method = mono_class_get_method_from_name (mono_get_thread_class(), "CurrentInternalThread_internal", 0);
6727 thread_ins = mono_emit_method_call (cfg, thread_method, NULL, NULL);
6729 addr = emit_managed_static_data_access (cfg, thread_ins, load->dreg);
6730 addr->klass = mono_class_from_mono_type (tls_field->type);
6731 addr->type = STACK_MP;
6732 *cached_tls_addr = temp = mono_compile_create_var (cfg, type_from_stack_type (addr), OP_LOCAL);
6733 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, addr);
6735 EMIT_NEW_TEMPLOAD (cfg, addr, temp->inst_c0);
6740 * mono_method_to_ir:
6742 * Translate the .net IL into linear IR.
6745 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
6746 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
6747 guint inline_offset, gboolean is_virtual_call)
6750 MonoInst *ins, **sp, **stack_start;
6751 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
6752 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
6753 MonoMethod *cmethod, *method_definition;
6754 MonoInst **arg_array;
6755 MonoMethodHeader *header;
6757 guint32 token, ins_flag;
6759 MonoClass *constrained_call = NULL;
6760 unsigned char *ip, *end, *target, *err_pos;
6761 MonoMethodSignature *sig;
6762 MonoGenericContext *generic_context = NULL;
6763 MonoGenericContainer *generic_container = NULL;
6764 MonoType **param_types;
6765 int i, n, start_new_bblock, dreg;
6766 int num_calls = 0, inline_costs = 0;
6767 int breakpoint_id = 0;
6769 MonoBoolean security, pinvoke;
6770 MonoSecurityManager* secman = NULL;
6771 MonoDeclSecurityActions actions;
6772 GSList *class_inits = NULL;
6773 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
6775 gboolean init_locals, seq_points, skip_dead_blocks;
6776 gboolean disable_inline, sym_seq_points = FALSE;
6777 MonoInst *cached_tls_addr = NULL;
6778 MonoDebugMethodInfo *minfo;
6779 MonoBitSet *seq_point_locs = NULL;
6780 MonoBitSet *seq_point_set_locs = NULL;
6782 disable_inline = is_jit_optimizer_disabled (method);
6784 /* serialization and xdomain stuff may need access to private fields and methods */
6785 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
6786 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
6787 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
6788 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
6789 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
6790 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
6792 dont_verify |= mono_security_smcs_hack_enabled ();
6794 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
6795 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
6796 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
6797 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
6798 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_STELEMREF;
6800 image = method->klass->image;
6801 header = mono_method_get_header (method);
6803 MonoLoaderError *error;
6805 if ((error = mono_loader_get_last_error ())) {
6806 mono_cfg_set_exception (cfg, error->exception_type);
6808 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
6809 cfg->exception_message = g_strdup_printf ("Missing or incorrect header for method %s", cfg->method->name);
6811 goto exception_exit;
6813 generic_container = mono_method_get_generic_container (method);
6814 sig = mono_method_signature (method);
6815 num_args = sig->hasthis + sig->param_count;
6816 ip = (unsigned char*)header->code;
6817 cfg->cil_start = ip;
6818 end = ip + header->code_size;
6819 cfg->stat_cil_code_size += header->code_size;
6821 seq_points = cfg->gen_seq_points && cfg->method == method;
6822 #ifdef PLATFORM_ANDROID
6823 seq_points &= cfg->method->wrapper_type == MONO_WRAPPER_NONE;
6826 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
6827 /* We could hit a seq point before attaching to the JIT (#8338) */
6831 if (cfg->gen_seq_points && cfg->method == method) {
6832 minfo = mono_debug_lookup_method (method);
6834 int i, n_il_offsets;
6838 mono_debug_symfile_get_line_numbers_full (minfo, NULL, NULL, &n_il_offsets, &il_offsets, &line_numbers, NULL, NULL);
6839 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
6840 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
6841 sym_seq_points = TRUE;
6842 for (i = 0; i < n_il_offsets; ++i) {
6843 if (il_offsets [i] < header->code_size)
6844 mono_bitset_set_fast (seq_point_locs, il_offsets [i]);
6846 g_free (il_offsets);
6847 g_free (line_numbers);
6852 * Methods without init_locals set could cause asserts in various passes
6853 * (#497220). To work around this, we emit dummy initialization opcodes
6854 * (OP_DUMMY_ICONST etc.) which generate no code. These are only supported
6855 * on some platforms.
6857 if ((cfg->opt & MONO_OPT_UNSAFE) && ARCH_HAVE_DUMMY_INIT)
6858 init_locals = header->init_locals;
6862 method_definition = method;
6863 while (method_definition->is_inflated) {
6864 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
6865 method_definition = imethod->declaring;
6868 /* SkipVerification is not allowed if core-clr is enabled */
6869 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
6871 dont_verify_stloc = TRUE;
6874 if (sig->is_inflated)
6875 generic_context = mono_method_get_context (method);
6876 else if (generic_container)
6877 generic_context = &generic_container->context;
6878 cfg->generic_context = generic_context;
6880 if (!cfg->generic_sharing_context)
6881 g_assert (!sig->has_type_parameters);
6883 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
6884 g_assert (method->is_inflated);
6885 g_assert (mono_method_get_context (method)->method_inst);
6887 if (method->is_inflated && mono_method_get_context (method)->method_inst)
6888 g_assert (sig->generic_param_count);
6890 if (cfg->method == method) {
6891 cfg->real_offset = 0;
6893 cfg->real_offset = inline_offset;
6896 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
6897 cfg->cil_offset_to_bb_len = header->code_size;
6899 cfg->current_method = method;
6901 if (cfg->verbose_level > 2)
6902 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
6904 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
6906 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
6907 for (n = 0; n < sig->param_count; ++n)
6908 param_types [n + sig->hasthis] = sig->params [n];
6909 cfg->arg_types = param_types;
6911 dont_inline = g_list_prepend (dont_inline, method);
6912 if (cfg->method == method) {
6914 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
6915 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
6918 NEW_BBLOCK (cfg, start_bblock);
6919 cfg->bb_entry = start_bblock;
6920 start_bblock->cil_code = NULL;
6921 start_bblock->cil_length = 0;
6922 #if defined(__native_client_codegen__)
6923 MONO_INST_NEW (cfg, ins, OP_NACL_GC_SAFE_POINT);
6924 ins->dreg = alloc_dreg (cfg, STACK_I4);
6925 MONO_ADD_INS (start_bblock, ins);
6929 NEW_BBLOCK (cfg, end_bblock);
6930 cfg->bb_exit = end_bblock;
6931 end_bblock->cil_code = NULL;
6932 end_bblock->cil_length = 0;
6933 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
6934 g_assert (cfg->num_bblocks == 2);
6936 arg_array = cfg->args;
6938 if (header->num_clauses) {
6939 cfg->spvars = g_hash_table_new (NULL, NULL);
6940 cfg->exvars = g_hash_table_new (NULL, NULL);
6942 /* handle exception clauses */
6943 for (i = 0; i < header->num_clauses; ++i) {
6944 MonoBasicBlock *try_bb;
6945 MonoExceptionClause *clause = &header->clauses [i];
6946 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
6947 try_bb->real_offset = clause->try_offset;
6948 try_bb->try_start = TRUE;
6949 try_bb->region = ((i + 1) << 8) | clause->flags;
6950 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
6951 tblock->real_offset = clause->handler_offset;
6952 tblock->flags |= BB_EXCEPTION_HANDLER;
6955 * Linking the try block with the EH block hinders inlining as we won't be able to
6956 * merge the bblocks from inlining and produce an artificial hole for no good reason.
6958 if (COMPILE_LLVM (cfg))
6959 link_bblock (cfg, try_bb, tblock);
6961 if (*(ip + clause->handler_offset) == CEE_POP)
6962 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
6964 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
6965 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
6966 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
6967 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
6968 MONO_ADD_INS (tblock, ins);
6970 if (seq_points && clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY) {
6971 /* finally clauses already have a seq point */
6972 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
6973 MONO_ADD_INS (tblock, ins);
6976 /* todo: is a fault block unsafe to optimize? */
6977 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
6978 tblock->flags |= BB_EXCEPTION_UNSAFE;
6982 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
6984 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
6986 /* catch and filter blocks get the exception object on the stack */
6987 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
6988 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
6989 MonoInst *dummy_use;
6991 /* mostly like handle_stack_args (), but just sets the input args */
6992 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
6993 tblock->in_scount = 1;
6994 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
6995 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
6998 * Add a dummy use for the exvar so its liveness info will be
7002 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
7004 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
7005 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
7006 tblock->flags |= BB_EXCEPTION_HANDLER;
7007 tblock->real_offset = clause->data.filter_offset;
7008 tblock->in_scount = 1;
7009 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
7010 /* The filter block shares the exvar with the handler block */
7011 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
7012 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
7013 MONO_ADD_INS (tblock, ins);
7017 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
7018 clause->data.catch_class &&
7019 cfg->generic_sharing_context &&
7020 mono_class_check_context_used (clause->data.catch_class)) {
7022 * In shared generic code with catch
7023 * clauses containing type variables
7024 * the exception handling code has to
7025 * be able to get to the rgctx.
7026 * Therefore we have to make sure that
7027 * the vtable/mrgctx argument (for
7028 * static or generic methods) or the
7029 * "this" argument (for non-static
7030 * methods) are live.
7032 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
7033 mini_method_get_context (method)->method_inst ||
7034 method->klass->valuetype) {
7035 mono_get_vtable_var (cfg);
7037 MonoInst *dummy_use;
7039 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
7044 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
7045 cfg->cbb = start_bblock;
7046 cfg->args = arg_array;
7047 mono_save_args (cfg, sig, inline_args);
7050 /* FIRST CODE BLOCK */
7051 NEW_BBLOCK (cfg, bblock);
7052 bblock->cil_code = ip;
7056 ADD_BBLOCK (cfg, bblock);
7058 if (cfg->method == method) {
7059 breakpoint_id = mono_debugger_method_has_breakpoint (method);
7060 if (breakpoint_id) {
7061 MONO_INST_NEW (cfg, ins, OP_BREAK);
7062 MONO_ADD_INS (bblock, ins);
7066 if (mono_security_cas_enabled ())
7067 secman = mono_security_manager_get_methods ();
7069 security = (secman && mono_security_method_has_declsec (method));
7070 /* at this point having security doesn't mean we have any code to generate */
7071 if (security && (cfg->method == method)) {
7072 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
7073 * And we do not want to enter the next section (with allocation) if we
7074 * have nothing to generate */
7075 security = mono_declsec_get_demands (method, &actions);
7078 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
7079 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
7081 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
7082 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
7083 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
7085 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
7086 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
7090 mono_custom_attrs_free (custom);
7093 custom = mono_custom_attrs_from_class (wrapped->klass);
7094 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
7098 mono_custom_attrs_free (custom);
7101 /* not a P/Invoke after all */
7106 /* we use a separate basic block for the initialization code */
7107 NEW_BBLOCK (cfg, init_localsbb);
7108 cfg->bb_init = init_localsbb;
7109 init_localsbb->real_offset = cfg->real_offset;
7110 start_bblock->next_bb = init_localsbb;
7111 init_localsbb->next_bb = bblock;
7112 link_bblock (cfg, start_bblock, init_localsbb);
7113 link_bblock (cfg, init_localsbb, bblock);
7115 cfg->cbb = init_localsbb;
7117 if (cfg->gsharedvt && cfg->method == method) {
7118 MonoGSharedVtMethodInfo *info;
7119 MonoInst *var, *locals_var;
7122 info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoGSharedVtMethodInfo));
7123 info->method = cfg->method;
7124 info->count_entries = 16;
7125 info->entries = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
7126 cfg->gsharedvt_info = info;
7128 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
7129 /* prevent it from being register allocated */
7130 //var->flags |= MONO_INST_VOLATILE;
7131 cfg->gsharedvt_info_var = var;
7133 ins = emit_get_rgctx_gsharedvt_method (cfg, mini_method_check_context_used (cfg, method), method, info);
7134 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, var->dreg, ins->dreg);
7136 /* Allocate locals */
7137 locals_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
7138 /* prevent it from being register allocated */
7139 //locals_var->flags |= MONO_INST_VOLATILE;
7140 cfg->gsharedvt_locals_var = locals_var;
7142 dreg = alloc_ireg (cfg);
7143 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, dreg, var->dreg, G_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, locals_size));
7145 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
7146 ins->dreg = locals_var->dreg;
7148 MONO_ADD_INS (cfg->cbb, ins);
7149 cfg->gsharedvt_locals_var_ins = ins;
7151 cfg->flags |= MONO_CFG_HAS_ALLOCA;
7154 ins->flags |= MONO_INST_INIT;
7158 /* at this point we know, if security is TRUE, that some code needs to be generated */
7159 if (security && (cfg->method == method)) {
7162 cfg->stat_cas_demand_generation++;
7164 if (actions.demand.blob) {
7165 /* Add code for SecurityAction.Demand */
7166 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
7167 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
7168 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
7169 mono_emit_method_call (cfg, secman->demand, args, NULL);
7171 if (actions.noncasdemand.blob) {
7172 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
7173 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
7174 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
7175 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
7176 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
7177 mono_emit_method_call (cfg, secman->demand, args, NULL);
7179 if (actions.demandchoice.blob) {
7180 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
7181 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
7182 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
7183 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
7184 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
7188 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
7190 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
7193 if (mono_security_core_clr_enabled ()) {
7194 /* check if this is native code, e.g. an icall or a p/invoke */
7195 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
7196 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
7198 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
7199 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
7201 /* if this ia a native call then it can only be JITted from platform code */
7202 if ((icall || pinvk) && method->klass && method->klass->image) {
7203 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
7204 MonoException *ex = icall ? mono_get_exception_security () :
7205 mono_get_exception_method_access ();
7206 emit_throw_exception (cfg, ex);
7213 CHECK_CFG_EXCEPTION;
7215 if (header->code_size == 0)
7218 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
7223 if (cfg->method == method)
7224 mono_debug_init_method (cfg, bblock, breakpoint_id);
7226 for (n = 0; n < header->num_locals; ++n) {
7227 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
7232 /* We force the vtable variable here for all shared methods
7233 for the possibility that they might show up in a stack
7234 trace where their exact instantiation is needed. */
7235 if (cfg->generic_sharing_context && method == cfg->method) {
7236 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
7237 mini_method_get_context (method)->method_inst ||
7238 method->klass->valuetype) {
7239 mono_get_vtable_var (cfg);
7241 /* FIXME: Is there a better way to do this?
7242 We need the variable live for the duration
7243 of the whole method. */
7244 cfg->args [0]->flags |= MONO_INST_VOLATILE;
7248 /* add a check for this != NULL to inlined methods */
7249 if (is_virtual_call) {
7252 NEW_ARGLOAD (cfg, arg_ins, 0);
7253 MONO_ADD_INS (cfg->cbb, arg_ins);
7254 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
7257 skip_dead_blocks = !dont_verify;
7258 if (skip_dead_blocks) {
7259 original_bb = bb = mono_basic_block_split (method, &error);
7260 if (!mono_error_ok (&error)) {
7261 mono_error_cleanup (&error);
7267 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
7268 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
7271 start_new_bblock = 0;
7274 if (cfg->method == method)
7275 cfg->real_offset = ip - header->code;
7277 cfg->real_offset = inline_offset;
7282 if (start_new_bblock) {
7283 bblock->cil_length = ip - bblock->cil_code;
7284 if (start_new_bblock == 2) {
7285 g_assert (ip == tblock->cil_code);
7287 GET_BBLOCK (cfg, tblock, ip);
7289 bblock->next_bb = tblock;
7292 start_new_bblock = 0;
7293 for (i = 0; i < bblock->in_scount; ++i) {
7294 if (cfg->verbose_level > 3)
7295 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
7296 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
7300 g_slist_free (class_inits);
7303 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
7304 link_bblock (cfg, bblock, tblock);
7305 if (sp != stack_start) {
7306 handle_stack_args (cfg, stack_start, sp - stack_start);
7308 CHECK_UNVERIFIABLE (cfg);
7310 bblock->next_bb = tblock;
7313 for (i = 0; i < bblock->in_scount; ++i) {
7314 if (cfg->verbose_level > 3)
7315 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
7316 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
7319 g_slist_free (class_inits);
7324 if (skip_dead_blocks) {
7325 int ip_offset = ip - header->code;
7327 if (ip_offset == bb->end)
7331 int op_size = mono_opcode_size (ip, end);
7332 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
7334 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
7336 if (ip_offset + op_size == bb->end) {
7337 MONO_INST_NEW (cfg, ins, OP_NOP);
7338 MONO_ADD_INS (bblock, ins);
7339 start_new_bblock = 1;
7347 * Sequence points are points where the debugger can place a breakpoint.
7348 * Currently, we generate these automatically at points where the IL
7351 if (seq_points && ((sp == stack_start) || (sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code)))) {
7353 * Make methods interruptable at the beginning, and at the targets of
7354 * backward branches.
7355 * Also, do this at the start of every bblock in methods with clauses too,
7356 * to be able to handle instructions with inprecise control flow like
7358 * Backward branches are handled at the end of method-to-ir ().
7360 gboolean intr_loc = ip == header->code || (!cfg->cbb->last_ins && cfg->header->num_clauses);
7362 /* Avoid sequence points on empty IL like .volatile */
7363 // FIXME: Enable this
7364 //if (!(cfg->cbb->last_ins && cfg->cbb->last_ins->opcode == OP_SEQ_POINT)) {
7365 NEW_SEQ_POINT (cfg, ins, ip - header->code, intr_loc);
7366 if (sp != stack_start)
7367 ins->flags |= MONO_INST_NONEMPTY_STACK;
7368 MONO_ADD_INS (cfg->cbb, ins);
7371 mono_bitset_set_fast (seq_point_set_locs, ip - header->code);
7374 bblock->real_offset = cfg->real_offset;
7376 if ((cfg->method == method) && cfg->coverage_info) {
7377 guint32 cil_offset = ip - header->code;
7378 cfg->coverage_info->data [cil_offset].cil_code = ip;
7380 /* TODO: Use an increment here */
7381 #if defined(TARGET_X86)
7382 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
7383 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
7385 MONO_ADD_INS (cfg->cbb, ins);
7387 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
7388 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
7392 if (cfg->verbose_level > 3)
7393 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
7397 if (seq_points && !sym_seq_points && sp != stack_start) {
7399 * The C# compiler uses these nops to notify the JIT that it should
7400 * insert seq points.
7402 NEW_SEQ_POINT (cfg, ins, ip - header->code, FALSE);
7403 MONO_ADD_INS (cfg->cbb, ins);
7405 if (cfg->keep_cil_nops)
7406 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
7408 MONO_INST_NEW (cfg, ins, OP_NOP);
7410 MONO_ADD_INS (bblock, ins);
7413 if (should_insert_brekpoint (cfg->method)) {
7414 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
7416 MONO_INST_NEW (cfg, ins, OP_NOP);
7419 MONO_ADD_INS (bblock, ins);
7425 CHECK_STACK_OVF (1);
7426 n = (*ip)-CEE_LDARG_0;
7428 EMIT_NEW_ARGLOAD (cfg, ins, n);
7436 CHECK_STACK_OVF (1);
7437 n = (*ip)-CEE_LDLOC_0;
7439 EMIT_NEW_LOCLOAD (cfg, ins, n);
7448 n = (*ip)-CEE_STLOC_0;
7451 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
7453 emit_stloc_ir (cfg, sp, header, n);
7460 CHECK_STACK_OVF (1);
7463 EMIT_NEW_ARGLOAD (cfg, ins, n);
7469 CHECK_STACK_OVF (1);
7472 NEW_ARGLOADA (cfg, ins, n);
7473 MONO_ADD_INS (cfg->cbb, ins);
7483 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
7485 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
7490 CHECK_STACK_OVF (1);
7493 EMIT_NEW_LOCLOAD (cfg, ins, n);
7497 case CEE_LDLOCA_S: {
7498 unsigned char *tmp_ip;
7500 CHECK_STACK_OVF (1);
7501 CHECK_LOCAL (ip [1]);
7503 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
7509 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
7518 CHECK_LOCAL (ip [1]);
7519 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
7521 emit_stloc_ir (cfg, sp, header, ip [1]);
7526 CHECK_STACK_OVF (1);
7527 EMIT_NEW_PCONST (cfg, ins, NULL);
7528 ins->type = STACK_OBJ;
7533 CHECK_STACK_OVF (1);
7534 EMIT_NEW_ICONST (cfg, ins, -1);
7547 CHECK_STACK_OVF (1);
7548 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
7554 CHECK_STACK_OVF (1);
7556 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
7562 CHECK_STACK_OVF (1);
7563 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
7569 CHECK_STACK_OVF (1);
7570 MONO_INST_NEW (cfg, ins, OP_I8CONST);
7571 ins->type = STACK_I8;
7572 ins->dreg = alloc_dreg (cfg, STACK_I8);
7574 ins->inst_l = (gint64)read64 (ip);
7575 MONO_ADD_INS (bblock, ins);
7581 gboolean use_aotconst = FALSE;
7583 #ifdef TARGET_POWERPC
7584 /* FIXME: Clean this up */
7585 if (cfg->compile_aot)
7586 use_aotconst = TRUE;
7589 /* FIXME: we should really allocate this only late in the compilation process */
7590 f = mono_domain_alloc (cfg->domain, sizeof (float));
7592 CHECK_STACK_OVF (1);
7598 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
7600 dreg = alloc_freg (cfg);
7601 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
7602 ins->type = STACK_R8;
7604 MONO_INST_NEW (cfg, ins, OP_R4CONST);
7605 ins->type = STACK_R8;
7606 ins->dreg = alloc_dreg (cfg, STACK_R8);
7608 MONO_ADD_INS (bblock, ins);
7618 gboolean use_aotconst = FALSE;
7620 #ifdef TARGET_POWERPC
7621 /* FIXME: Clean this up */
7622 if (cfg->compile_aot)
7623 use_aotconst = TRUE;
7626 /* FIXME: we should really allocate this only late in the compilation process */
7627 d = mono_domain_alloc (cfg->domain, sizeof (double));
7629 CHECK_STACK_OVF (1);
7635 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
7637 dreg = alloc_freg (cfg);
7638 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
7639 ins->type = STACK_R8;
7641 MONO_INST_NEW (cfg, ins, OP_R8CONST);
7642 ins->type = STACK_R8;
7643 ins->dreg = alloc_dreg (cfg, STACK_R8);
7645 MONO_ADD_INS (bblock, ins);
7654 MonoInst *temp, *store;
7656 CHECK_STACK_OVF (1);
7660 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
7661 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
7663 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
7666 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
7679 if (sp [0]->type == STACK_R8)
7680 /* we need to pop the value from the x86 FP stack */
7681 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
7687 INLINE_FAILURE ("jmp");
7688 GSHAREDVT_FAILURE (*ip);
7691 if (stack_start != sp)
7693 token = read32 (ip + 1);
7694 /* FIXME: check the signature matches */
7695 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7697 if (!cmethod || mono_loader_get_last_error ())
7700 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
7701 GENERIC_SHARING_FAILURE (CEE_JMP);
7703 if (mono_security_cas_enabled ())
7704 CHECK_CFG_EXCEPTION;
7706 if (ARCH_HAVE_OP_TAIL_CALL) {
7707 MonoMethodSignature *fsig = mono_method_signature (cmethod);
7710 /* Handle tail calls similarly to calls */
7711 n = fsig->param_count + fsig->hasthis;
7715 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
7716 call->method = cmethod;
7717 call->tail_call = TRUE;
7718 call->signature = mono_method_signature (cmethod);
7719 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
7720 call->inst.inst_p0 = cmethod;
7721 for (i = 0; i < n; ++i)
7722 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
7724 mono_arch_emit_call (cfg, call);
7725 MONO_ADD_INS (bblock, (MonoInst*)call);
7727 for (i = 0; i < num_args; ++i)
7728 /* Prevent arguments from being optimized away */
7729 arg_array [i]->flags |= MONO_INST_VOLATILE;
7731 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
7732 ins = (MonoInst*)call;
7733 ins->inst_p0 = cmethod;
7734 MONO_ADD_INS (bblock, ins);
7738 start_new_bblock = 1;
7743 case CEE_CALLVIRT: {
7744 MonoInst *addr = NULL;
7745 MonoMethodSignature *fsig = NULL;
7747 int virtual = *ip == CEE_CALLVIRT;
7748 int calli = *ip == CEE_CALLI;
7749 gboolean pass_imt_from_rgctx = FALSE;
7750 MonoInst *imt_arg = NULL;
7751 MonoInst *keep_this_alive = NULL;
7752 gboolean pass_vtable = FALSE;
7753 gboolean pass_mrgctx = FALSE;
7754 MonoInst *vtable_arg = NULL;
7755 gboolean check_this = FALSE;
7756 gboolean supported_tail_call = FALSE;
7757 gboolean tail_call = FALSE;
7758 gboolean need_seq_point = FALSE;
7759 guint32 call_opcode = *ip;
7760 gboolean emit_widen = TRUE;
7761 gboolean push_res = TRUE;
7762 gboolean skip_ret = FALSE;
7763 gboolean delegate_invoke = FALSE;
7766 token = read32 (ip + 1);
7771 //GSHAREDVT_FAILURE (*ip);
7776 fsig = mini_get_signature (method, token, generic_context);
7777 n = fsig->param_count + fsig->hasthis;
7779 if (method->dynamic && fsig->pinvoke) {
7783 * This is a call through a function pointer using a pinvoke
7784 * signature. Have to create a wrapper and call that instead.
7785 * FIXME: This is very slow, need to create a wrapper at JIT time
7786 * instead based on the signature.
7788 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
7789 EMIT_NEW_PCONST (cfg, args [1], fsig);
7791 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
7794 MonoMethod *cil_method;
7796 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7797 cil_method = cmethod;
7799 if (constrained_call) {
7800 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7801 if (cfg->verbose_level > 2)
7802 printf ("DM Constrained call to %s\n", mono_type_get_full_name (constrained_call));
7803 if (!((constrained_call->byval_arg.type == MONO_TYPE_VAR ||
7804 constrained_call->byval_arg.type == MONO_TYPE_MVAR) &&
7805 cfg->generic_sharing_context)) {
7806 cmethod = mono_get_method_constrained_with_method (image, cil_method, constrained_call, generic_context);
7809 if (cfg->verbose_level > 2)
7810 printf ("Constrained call to %s\n", mono_type_get_full_name (constrained_call));
7812 if ((constrained_call->byval_arg.type == MONO_TYPE_VAR || constrained_call->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
7814 * This is needed since get_method_constrained can't find
7815 * the method in klass representing a type var.
7816 * The type var is guaranteed to be a reference type in this
7819 if (!mini_is_gsharedvt_klass (cfg, constrained_call))
7820 g_assert (!cmethod->klass->valuetype);
7822 cmethod = mono_get_method_constrained (image, token, constrained_call, generic_context, &cil_method);
7827 if (!cmethod || mono_loader_get_last_error ())
7829 if (!dont_verify && !cfg->skip_visibility) {
7830 MonoMethod *target_method = cil_method;
7831 if (method->is_inflated) {
7832 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
7834 if (!mono_method_can_access_method (method_definition, target_method) &&
7835 !mono_method_can_access_method (method, cil_method))
7836 METHOD_ACCESS_FAILURE;
7839 if (mono_security_core_clr_enabled ())
7840 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
7842 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
7843 /* MS.NET seems to silently convert this to a callvirt */
7848 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
7849 * converts to a callvirt.
7851 * tests/bug-515884.il is an example of this behavior
7853 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
7854 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
7855 if (!virtual && mono_class_is_marshalbyref (cmethod->klass) && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
7859 if (!cmethod->klass->inited)
7860 if (!mono_class_init (cmethod->klass))
7861 TYPE_LOAD_ERROR (cmethod->klass);
7863 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
7864 mini_class_is_system_array (cmethod->klass)) {
7865 array_rank = cmethod->klass->rank;
7866 fsig = mono_method_signature (cmethod);
7868 fsig = mono_method_signature (cmethod);
7873 if (fsig->pinvoke) {
7874 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
7875 check_for_pending_exc, cfg->compile_aot);
7876 fsig = mono_method_signature (wrapper);
7877 } else if (constrained_call) {
7878 fsig = mono_method_signature (cmethod);
7880 fsig = mono_method_get_signature_full (cmethod, image, token, generic_context);
7884 mono_save_token_info (cfg, image, token, cil_method);
7886 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
7888 * Need to emit an implicit seq point after every non-void call so single stepping through nested calls like
7889 * foo (bar (), baz ())
7890 * works correctly. MS does this also:
7891 * http://stackoverflow.com/questions/6937198/making-your-net-language-step-correctly-in-the-debugger
7892 * The problem with this approach is that the debugger will stop after all calls returning a value,
7893 * even for simple cases, like:
7896 /* Special case a few common successor opcodes */
7897 if (!(ip + 5 < end && (ip [5] == CEE_POP || ip [5] == CEE_NOP)) && !(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip + 5 - header->code)))
7898 need_seq_point = TRUE;
7901 n = fsig->param_count + fsig->hasthis;
7903 /* Don't support calls made using type arguments for now */
7905 if (cfg->gsharedvt) {
7906 if (mini_is_gsharedvt_signature (cfg, fsig))
7907 GSHAREDVT_FAILURE (*ip);
7911 if (mono_security_cas_enabled ()) {
7912 if (check_linkdemand (cfg, method, cmethod))
7913 INLINE_FAILURE ("linkdemand");
7914 CHECK_CFG_EXCEPTION;
7917 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
7918 g_assert_not_reached ();
7921 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
7924 if (!cfg->generic_sharing_context && cmethod)
7925 g_assert (!mono_method_check_context_used (cmethod));
7929 //g_assert (!virtual || fsig->hasthis);
7933 if (constrained_call) {
7934 if (mini_is_gsharedvt_klass (cfg, constrained_call)) {
7936 * Constrained calls need to behave differently at runtime dependending on whenever the receiver is instantiated as ref type or as a vtype.
7938 if ((cmethod->klass != mono_defaults.object_class) && constrained_call->valuetype && cmethod->klass->valuetype) {
7939 /* The 'Own method' case below */
7940 } else if (cmethod->klass->image != mono_defaults.corlib && !(cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) && !cmethod->klass->valuetype) {
7941 /* 'The type parameter is instantiated as a reference type' case below. */
7942 } else if (((cmethod->klass == mono_defaults.object_class) || (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) || (!cmethod->klass->valuetype && cmethod->klass->image != mono_defaults.corlib)) &&
7943 (MONO_TYPE_IS_VOID (fsig->ret) || MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_IS_REFERENCE (fsig->ret) || mini_is_gsharedvt_type (cfg, fsig->ret)) &&
7944 (fsig->param_count == 0 || (!fsig->hasthis && fsig->param_count == 1) || (fsig->param_count == 1 && (MONO_TYPE_IS_REFERENCE (fsig->params [0]) || mini_is_gsharedvt_type (cfg, fsig->params [0]))))) {
7945 MonoInst *args [16];
7948 * This case handles calls to
7949 * - object:ToString()/Equals()/GetHashCode(),
7950 * - System.IComparable<T>:CompareTo()
7951 * - System.IEquatable<T>:Equals ()
7952 * plus some simple interface calls enough to support AsyncTaskMethodBuilder.
7956 if (mono_method_check_context_used (cmethod))
7957 args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (cmethod), cmethod, MONO_RGCTX_INFO_METHOD);
7959 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
7960 args [2] = emit_get_rgctx_klass (cfg, mono_class_check_context_used (constrained_call), constrained_call, MONO_RGCTX_INFO_KLASS);
7962 /* !fsig->hasthis is for the wrapper for the Object.GetType () icall */
7963 if (fsig->hasthis && fsig->param_count) {
7964 /* Pass the arguments using a localloc-ed array using the format expected by runtime_invoke () */
7965 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
7966 ins->dreg = alloc_preg (cfg);
7967 ins->inst_imm = fsig->param_count * sizeof (mgreg_t);
7968 MONO_ADD_INS (cfg->cbb, ins);
7971 if (mini_is_gsharedvt_type (cfg, fsig->params [0])) {
7974 args [3] = emit_get_gsharedvt_info_klass (cfg, mono_class_from_mono_type (fsig->params [0]), MONO_RGCTX_INFO_CLASS_BOX_TYPE);
7976 EMIT_NEW_VARLOADA_VREG (cfg, ins, sp [1]->dreg, fsig->params [0]);
7977 addr_reg = ins->dreg;
7978 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, addr_reg);
7980 EMIT_NEW_ICONST (cfg, args [3], 0);
7981 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, sp [1]->dreg);
7984 EMIT_NEW_ICONST (cfg, args [3], 0);
7985 EMIT_NEW_ICONST (cfg, args [4], 0);
7987 ins = mono_emit_jit_icall (cfg, mono_gsharedvt_constrained_call, args);
7990 if (mini_is_gsharedvt_type (cfg, fsig->ret)) {
7991 ins = handle_unbox_gsharedvt (cfg, mono_class_from_mono_type (fsig->ret), ins, &bblock);
7992 } else if (MONO_TYPE_IS_PRIMITIVE (fsig->ret)) {
7996 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), ins->dreg, sizeof (MonoObject));
7997 MONO_ADD_INS (cfg->cbb, add);
7999 NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, add->dreg, 0);
8000 MONO_ADD_INS (cfg->cbb, ins);
8001 /* ins represents the call result */
8006 GSHAREDVT_FAILURE (*ip);
8010 * We have the `constrained.' prefix opcode.
8012 if (constrained_call->valuetype && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
8014 * The type parameter is instantiated as a valuetype,
8015 * but that type doesn't override the method we're
8016 * calling, so we need to box `this'.
8018 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
8019 ins->klass = constrained_call;
8020 sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call), &bblock);
8021 CHECK_CFG_EXCEPTION;
8022 } else if (!constrained_call->valuetype) {
8023 int dreg = alloc_ireg_ref (cfg);
8026 * The type parameter is instantiated as a reference
8027 * type. We have a managed pointer on the stack, so
8028 * we need to dereference it here.
8030 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
8031 ins->type = STACK_OBJ;
8034 if (cmethod->klass->valuetype) {
8037 /* Interface method */
8040 mono_class_setup_vtable (constrained_call);
8041 CHECK_TYPELOAD (constrained_call);
8042 ioffset = mono_class_interface_offset (constrained_call, cmethod->klass);
8044 TYPE_LOAD_ERROR (constrained_call);
8045 slot = mono_method_get_vtable_slot (cmethod);
8047 TYPE_LOAD_ERROR (cmethod->klass);
8048 cmethod = constrained_call->vtable [ioffset + slot];
8050 if (cmethod->klass == mono_defaults.enum_class) {
8051 /* Enum implements some interfaces, so treat this as the first case */
8052 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
8053 ins->klass = constrained_call;
8054 sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call), &bblock);
8055 CHECK_CFG_EXCEPTION;
8060 constrained_call = NULL;
8063 if (!calli && check_call_signature (cfg, fsig, sp))
8066 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
8067 if (cmethod && (cmethod->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (cmethod->name, "Invoke"))
8068 delegate_invoke = TRUE;
8071 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_sharable_method (cfg, cmethod, fsig, sp))) {
8073 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8074 type_to_eval_stack_type ((cfg), fsig->ret, ins);
8082 * If the callee is a shared method, then its static cctor
8083 * might not get called after the call was patched.
8085 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
8086 emit_generic_class_init (cfg, cmethod->klass);
8087 CHECK_TYPELOAD (cmethod->klass);
8091 check_method_sharing (cfg, cmethod, &pass_vtable, &pass_mrgctx);
8093 if (cfg->generic_sharing_context && cmethod) {
8094 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
8096 context_used = mini_method_check_context_used (cfg, cmethod);
8098 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
8099 /* Generic method interface
8100 calls are resolved via a
8101 helper function and don't
8103 if (!cmethod_context || !cmethod_context->method_inst)
8104 pass_imt_from_rgctx = TRUE;
8108 * If a shared method calls another
8109 * shared method then the caller must
8110 * have a generic sharing context
8111 * because the magic trampoline
8112 * requires it. FIXME: We shouldn't
8113 * have to force the vtable/mrgctx
8114 * variable here. Instead there
8115 * should be a flag in the cfg to
8116 * request a generic sharing context.
8119 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
8120 mono_get_vtable_var (cfg);
8125 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
8127 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
8129 CHECK_TYPELOAD (cmethod->klass);
8130 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
8135 g_assert (!vtable_arg);
8137 if (!cfg->compile_aot) {
8139 * emit_get_rgctx_method () calls mono_class_vtable () so check
8140 * for type load errors before.
8142 mono_class_setup_vtable (cmethod->klass);
8143 CHECK_TYPELOAD (cmethod->klass);
8146 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
8148 /* !marshalbyref is needed to properly handle generic methods + remoting */
8149 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
8150 MONO_METHOD_IS_FINAL (cmethod)) &&
8151 !mono_class_is_marshalbyref (cmethod->klass)) {
8158 if (pass_imt_from_rgctx) {
8159 g_assert (!pass_vtable);
8162 imt_arg = emit_get_rgctx_method (cfg, context_used,
8163 cmethod, MONO_RGCTX_INFO_METHOD);
8167 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
8169 /* Calling virtual generic methods */
8170 if (cmethod && virtual &&
8171 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
8172 !(MONO_METHOD_IS_FINAL (cmethod) &&
8173 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
8174 fsig->generic_param_count &&
8175 !(cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))) {
8176 MonoInst *this_temp, *this_arg_temp, *store;
8177 MonoInst *iargs [4];
8178 gboolean use_imt = FALSE;
8180 g_assert (fsig->is_inflated);
8182 /* Prevent inlining of methods that contain indirect calls */
8183 INLINE_FAILURE ("virtual generic call");
8185 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))
8186 GSHAREDVT_FAILURE (*ip);
8188 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK && defined(MONO_ARCH_GSHARED_SUPPORTED)
8189 if (cmethod->wrapper_type == MONO_WRAPPER_NONE && mono_use_imt)
8194 g_assert (!imt_arg);
8196 g_assert (cmethod->is_inflated);
8197 imt_arg = emit_get_rgctx_method (cfg, context_used,
8198 cmethod, MONO_RGCTX_INFO_METHOD);
8199 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, sp [0], imt_arg, NULL);
8201 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
8202 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
8203 MONO_ADD_INS (bblock, store);
8205 /* FIXME: This should be a managed pointer */
8206 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
8208 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
8209 iargs [1] = emit_get_rgctx_method (cfg, context_used,
8210 cmethod, MONO_RGCTX_INFO_METHOD);
8211 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
8212 addr = mono_emit_jit_icall (cfg,
8213 mono_helper_compile_generic_method, iargs);
8215 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
8217 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8224 * Implement a workaround for the inherent races involved in locking:
8230 * If a thread abort happens between the call to Monitor.Enter () and the start of the
8231 * try block, the Exit () won't be executed, see:
8232 * http://www.bluebytesoftware.com/blog/2007/01/30/MonitorEnterThreadAbortsAndOrphanedLocks.aspx
8233 * To work around this, we extend such try blocks to include the last x bytes
8234 * of the Monitor.Enter () call.
8236 if (cmethod && cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
8237 MonoBasicBlock *tbb;
8239 GET_BBLOCK (cfg, tbb, ip + 5);
8241 * Only extend try blocks with a finally, to avoid catching exceptions thrown
8242 * from Monitor.Enter like ArgumentNullException.
8244 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
8245 /* Mark this bblock as needing to be extended */
8246 tbb->extend_try_block = TRUE;
8250 /* Conversion to a JIT intrinsic */
8251 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
8253 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8254 type_to_eval_stack_type ((cfg), fsig->ret, ins);
8261 if (cmethod && (cfg->opt & MONO_OPT_INLINE) &&
8262 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
8263 !disable_inline && mono_method_check_inlining (cfg, cmethod) &&
8264 !g_list_find (dont_inline, cmethod)) {
8266 gboolean always = FALSE;
8268 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
8269 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
8270 /* Prevent inlining of methods that call wrappers */
8271 INLINE_FAILURE ("wrapper call");
8272 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
8276 costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, always);
8278 cfg->real_offset += 5;
8281 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8282 /* *sp is already set by inline_method */
8287 inline_costs += costs;
8293 /* Tail recursion elimination */
8294 if ((cfg->opt & MONO_OPT_TAILC) && call_opcode == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
8295 gboolean has_vtargs = FALSE;
8298 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
8299 INLINE_FAILURE ("tail call");
8301 /* keep it simple */
8302 for (i = fsig->param_count - 1; i >= 0; i--) {
8303 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
8308 for (i = 0; i < n; ++i)
8309 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
8310 MONO_INST_NEW (cfg, ins, OP_BR);
8311 MONO_ADD_INS (bblock, ins);
8312 tblock = start_bblock->out_bb [0];
8313 link_bblock (cfg, bblock, tblock);
8314 ins->inst_target_bb = tblock;
8315 start_new_bblock = 1;
8317 /* skip the CEE_RET, too */
8318 if (ip_in_bb (cfg, bblock, ip + 5))
8325 inline_costs += 10 * num_calls++;
8328 * Making generic calls out of gsharedvt methods.
8330 if (cmethod && cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
8331 MonoRgctxInfoType info_type;
8334 //if (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)
8335 //GSHAREDVT_FAILURE (*ip);
8336 // disable for possible remoting calls
8337 if (fsig->hasthis && (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class))
8338 GSHAREDVT_FAILURE (*ip);
8339 if (fsig->generic_param_count) {
8340 /* virtual generic call */
8341 g_assert (mono_use_imt);
8342 g_assert (!imt_arg);
8343 /* Same as the virtual generic case above */
8344 imt_arg = emit_get_rgctx_method (cfg, context_used,
8345 cmethod, MONO_RGCTX_INFO_METHOD);
8346 /* This is not needed, as the trampoline code will pass one, and it might be passed in the same reg as the imt arg */
8351 if (cmethod->klass->rank && cmethod->klass->byval_arg.type != MONO_TYPE_SZARRAY)
8352 /* test_0_multi_dim_arrays () in gshared.cs */
8353 GSHAREDVT_FAILURE (*ip);
8355 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (cmethod->name, "Invoke")))
8356 keep_this_alive = sp [0];
8358 if (virtual && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))
8359 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE_VIRT;
8361 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE;
8362 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, info_type);
8364 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
8366 } else if (calli && cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
8368 * We pass the address to the gsharedvt trampoline in the rgctx reg
8370 MonoInst *callee = addr;
8372 if (method->wrapper_type != MONO_WRAPPER_DELEGATE_INVOKE)
8374 GSHAREDVT_FAILURE (*ip);
8376 addr = emit_get_rgctx_sig (cfg, context_used,
8377 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
8378 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, callee);
8382 /* Generic sharing */
8383 /* FIXME: only do this for generic methods if
8384 they are not shared! */
8385 if (context_used && !imt_arg && !array_rank && !delegate_invoke &&
8386 (!mono_method_is_generic_sharable (cmethod, TRUE) ||
8387 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
8388 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
8389 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
8390 INLINE_FAILURE ("gshared");
8392 g_assert (cfg->generic_sharing_context && cmethod);
8396 * We are compiling a call to a
8397 * generic method from shared code,
8398 * which means that we have to look up
8399 * the method in the rgctx and do an
8403 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
8405 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
8406 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
8410 /* Indirect calls */
8412 if (call_opcode == CEE_CALL)
8413 g_assert (context_used);
8414 else if (call_opcode == CEE_CALLI)
8415 g_assert (!vtable_arg);
8417 /* FIXME: what the hell is this??? */
8418 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
8419 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
8421 /* Prevent inlining of methods with indirect calls */
8422 INLINE_FAILURE ("indirect call");
8424 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST || addr->opcode == OP_GOT_ENTRY) {
8429 * Instead of emitting an indirect call, emit a direct call
8430 * with the contents of the aotconst as the patch info.
8432 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST) {
8433 info_type = addr->inst_c1;
8434 info_data = addr->inst_p0;
8436 info_type = addr->inst_right->inst_c1;
8437 info_data = addr->inst_right->inst_left;
8440 if (info_type == MONO_PATCH_INFO_ICALL_ADDR || info_type == MONO_PATCH_INFO_JIT_ICALL_ADDR) {
8441 ins = (MonoInst*)mono_emit_abs_call (cfg, info_type, info_data, fsig, sp);
8446 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
8454 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
8455 MonoInst *val = sp [fsig->param_count];
8457 if (val->type == STACK_OBJ) {
8458 MonoInst *iargs [2];
8463 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
8466 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
8467 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, val->dreg);
8468 if (cfg->gen_write_barriers && val->type == STACK_OBJ && !(val->opcode == OP_PCONST && val->inst_c0 == 0))
8469 emit_write_barrier (cfg, addr, val);
8470 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
8471 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
8473 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
8474 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
8475 if (!cmethod->klass->element_class->valuetype && !readonly)
8476 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
8477 CHECK_TYPELOAD (cmethod->klass);
8480 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
8483 g_assert_not_reached ();
8490 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
8494 /* Tail prefix / tail call optimization */
8496 /* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests */
8497 /* FIXME: runtime generic context pointer for jumps? */
8498 /* FIXME: handle this for generic sharing eventually */
8499 if (cmethod && (ins_flag & MONO_INST_TAILCALL) &&
8500 !vtable_arg && !cfg->generic_sharing_context && is_supported_tail_call (cfg, method, cmethod, fsig, call_opcode))
8501 supported_tail_call = TRUE;
8503 if (supported_tail_call) {
8506 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
8507 INLINE_FAILURE ("tail call");
8509 //printf ("HIT: %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
8511 if (ARCH_HAVE_OP_TAIL_CALL) {
8512 /* Handle tail calls similarly to normal calls */
8515 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
8516 call->tail_call = TRUE;
8517 call->method = cmethod;
8518 call->signature = mono_method_signature (cmethod);
8521 * We implement tail calls by storing the actual arguments into the
8522 * argument variables, then emitting a CEE_JMP.
8524 for (i = 0; i < n; ++i) {
8525 /* Prevent argument from being register allocated */
8526 arg_array [i]->flags |= MONO_INST_VOLATILE;
8527 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
8529 ins = (MonoInst*)call;
8530 ins->inst_p0 = cmethod;
8531 ins->inst_p1 = arg_array [0];
8532 MONO_ADD_INS (bblock, ins);
8533 link_bblock (cfg, bblock, end_bblock);
8534 start_new_bblock = 1;
8536 // FIXME: Eliminate unreachable epilogs
8539 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
8540 * only reachable from this call.
8542 GET_BBLOCK (cfg, tblock, ip + 5);
8543 if (tblock == bblock || tblock->in_count == 0)
8552 * Synchronized wrappers.
8553 * Its hard to determine where to replace a method with its synchronized
8554 * wrapper without causing an infinite recursion. The current solution is
8555 * to add the synchronized wrapper in the trampolines, and to
8556 * change the called method to a dummy wrapper, and resolve that wrapper
8557 * to the real method in mono_jit_compile_method ().
8559 if (cfg->method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
8560 MonoMethod *orig = mono_marshal_method_from_wrapper (cfg->method);
8561 if (cmethod == orig || (cmethod->is_inflated && mono_method_get_declaring_generic_method (cmethod) == orig))
8562 cmethod = mono_marshal_get_synchronized_inner_wrapper (cmethod);
8566 INLINE_FAILURE ("call");
8567 ins = mono_emit_method_call_full (cfg, cmethod, fsig, tail_call, sp, virtual ? sp [0] : NULL,
8568 imt_arg, vtable_arg);
8571 link_bblock (cfg, bblock, end_bblock);
8572 start_new_bblock = 1;
8574 // FIXME: Eliminate unreachable epilogs
8577 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
8578 * only reachable from this call.
8580 GET_BBLOCK (cfg, tblock, ip + 5);
8581 if (tblock == bblock || tblock->in_count == 0)
8588 /* End of call, INS should contain the result of the call, if any */
8590 if (push_res && !MONO_TYPE_IS_VOID (fsig->ret)) {
8593 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
8598 if (keep_this_alive) {
8599 MonoInst *dummy_use;
8601 /* See mono_emit_method_call_full () */
8602 EMIT_NEW_DUMMY_USE (cfg, dummy_use, keep_this_alive);
8605 CHECK_CFG_EXCEPTION;
8609 g_assert (*ip == CEE_RET);
8613 constrained_call = NULL;
8615 emit_seq_point (cfg, method, ip, FALSE, TRUE);
8619 if (cfg->method != method) {
8620 /* return from inlined method */
8622 * If in_count == 0, that means the ret is unreachable due to
8623 * being preceeded by a throw. In that case, inline_method () will
8624 * handle setting the return value
8625 * (test case: test_0_inline_throw ()).
8627 if (return_var && cfg->cbb->in_count) {
8628 MonoType *ret_type = mono_method_signature (method)->ret;
8634 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
8637 //g_assert (returnvar != -1);
8638 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
8639 cfg->ret_var_set = TRUE;
8642 if (cfg->lmf_var && cfg->cbb->in_count)
8646 MonoType *ret_type = mini_replace_type (mono_method_signature (method)->ret);
8648 if (seq_points && !sym_seq_points) {
8650 * Place a seq point here too even through the IL stack is not
8651 * empty, so a step over on
8654 * will work correctly.
8656 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
8657 MONO_ADD_INS (cfg->cbb, ins);
8660 g_assert (!return_var);
8664 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
8667 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
8670 if (!cfg->vret_addr) {
8673 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
8675 EMIT_NEW_RETLOADA (cfg, ret_addr);
8677 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
8678 ins->klass = mono_class_from_mono_type (ret_type);
8681 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
8682 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
8683 MonoInst *iargs [1];
8687 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
8688 mono_arch_emit_setret (cfg, method, conv);
8690 mono_arch_emit_setret (cfg, method, *sp);
8693 mono_arch_emit_setret (cfg, method, *sp);
8698 if (sp != stack_start)
8700 MONO_INST_NEW (cfg, ins, OP_BR);
8702 ins->inst_target_bb = end_bblock;
8703 MONO_ADD_INS (bblock, ins);
8704 link_bblock (cfg, bblock, end_bblock);
8705 start_new_bblock = 1;
8709 MONO_INST_NEW (cfg, ins, OP_BR);
8711 target = ip + 1 + (signed char)(*ip);
8713 GET_BBLOCK (cfg, tblock, target);
8714 link_bblock (cfg, bblock, tblock);
8715 ins->inst_target_bb = tblock;
8716 if (sp != stack_start) {
8717 handle_stack_args (cfg, stack_start, sp - stack_start);
8719 CHECK_UNVERIFIABLE (cfg);
8721 MONO_ADD_INS (bblock, ins);
8722 start_new_bblock = 1;
8723 inline_costs += BRANCH_COST;
8737 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
8739 target = ip + 1 + *(signed char*)ip;
8745 inline_costs += BRANCH_COST;
8749 MONO_INST_NEW (cfg, ins, OP_BR);
8752 target = ip + 4 + (gint32)read32(ip);
8754 GET_BBLOCK (cfg, tblock, target);
8755 link_bblock (cfg, bblock, tblock);
8756 ins->inst_target_bb = tblock;
8757 if (sp != stack_start) {
8758 handle_stack_args (cfg, stack_start, sp - stack_start);
8760 CHECK_UNVERIFIABLE (cfg);
8763 MONO_ADD_INS (bblock, ins);
8765 start_new_bblock = 1;
8766 inline_costs += BRANCH_COST;
8773 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
8774 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
8775 guint32 opsize = is_short ? 1 : 4;
8777 CHECK_OPSIZE (opsize);
8779 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
8782 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
8787 GET_BBLOCK (cfg, tblock, target);
8788 link_bblock (cfg, bblock, tblock);
8789 GET_BBLOCK (cfg, tblock, ip);
8790 link_bblock (cfg, bblock, tblock);
8792 if (sp != stack_start) {
8793 handle_stack_args (cfg, stack_start, sp - stack_start);
8794 CHECK_UNVERIFIABLE (cfg);
8797 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
8798 cmp->sreg1 = sp [0]->dreg;
8799 type_from_op (cmp, sp [0], NULL);
8802 #if SIZEOF_REGISTER == 4
8803 if (cmp->opcode == OP_LCOMPARE_IMM) {
8804 /* Convert it to OP_LCOMPARE */
8805 MONO_INST_NEW (cfg, ins, OP_I8CONST);
8806 ins->type = STACK_I8;
8807 ins->dreg = alloc_dreg (cfg, STACK_I8);
8809 MONO_ADD_INS (bblock, ins);
8810 cmp->opcode = OP_LCOMPARE;
8811 cmp->sreg2 = ins->dreg;
8814 MONO_ADD_INS (bblock, cmp);
8816 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
8817 type_from_op (ins, sp [0], NULL);
8818 MONO_ADD_INS (bblock, ins);
8819 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
8820 GET_BBLOCK (cfg, tblock, target);
8821 ins->inst_true_bb = tblock;
8822 GET_BBLOCK (cfg, tblock, ip);
8823 ins->inst_false_bb = tblock;
8824 start_new_bblock = 2;
8827 inline_costs += BRANCH_COST;
8842 MONO_INST_NEW (cfg, ins, *ip);
8844 target = ip + 4 + (gint32)read32(ip);
8850 inline_costs += BRANCH_COST;
8854 MonoBasicBlock **targets;
8855 MonoBasicBlock *default_bblock;
8856 MonoJumpInfoBBTable *table;
8857 int offset_reg = alloc_preg (cfg);
8858 int target_reg = alloc_preg (cfg);
8859 int table_reg = alloc_preg (cfg);
8860 int sum_reg = alloc_preg (cfg);
8861 gboolean use_op_switch;
8865 n = read32 (ip + 1);
8868 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
8872 CHECK_OPSIZE (n * sizeof (guint32));
8873 target = ip + n * sizeof (guint32);
8875 GET_BBLOCK (cfg, default_bblock, target);
8876 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
8878 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
8879 for (i = 0; i < n; ++i) {
8880 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
8881 targets [i] = tblock;
8882 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
8886 if (sp != stack_start) {
8888 * Link the current bb with the targets as well, so handle_stack_args
8889 * will set their in_stack correctly.
8891 link_bblock (cfg, bblock, default_bblock);
8892 for (i = 0; i < n; ++i)
8893 link_bblock (cfg, bblock, targets [i]);
8895 handle_stack_args (cfg, stack_start, sp - stack_start);
8897 CHECK_UNVERIFIABLE (cfg);
8900 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
8901 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
8904 for (i = 0; i < n; ++i)
8905 link_bblock (cfg, bblock, targets [i]);
8907 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
8908 table->table = targets;
8909 table->table_size = n;
8911 use_op_switch = FALSE;
8913 /* ARM implements SWITCH statements differently */
8914 /* FIXME: Make it use the generic implementation */
8915 if (!cfg->compile_aot)
8916 use_op_switch = TRUE;
8919 if (COMPILE_LLVM (cfg))
8920 use_op_switch = TRUE;
8922 cfg->cbb->has_jump_table = 1;
8924 if (use_op_switch) {
8925 MONO_INST_NEW (cfg, ins, OP_SWITCH);
8926 ins->sreg1 = src1->dreg;
8927 ins->inst_p0 = table;
8928 ins->inst_many_bb = targets;
8929 ins->klass = GUINT_TO_POINTER (n);
8930 MONO_ADD_INS (cfg->cbb, ins);
8932 if (sizeof (gpointer) == 8)
8933 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
8935 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
8937 #if SIZEOF_REGISTER == 8
8938 /* The upper word might not be zero, and we add it to a 64 bit address later */
8939 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
8942 if (cfg->compile_aot) {
8943 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
8945 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
8946 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
8947 ins->inst_p0 = table;
8948 ins->dreg = table_reg;
8949 MONO_ADD_INS (cfg->cbb, ins);
8952 /* FIXME: Use load_memindex */
8953 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
8954 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
8955 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
8957 start_new_bblock = 1;
8958 inline_costs += (BRANCH_COST * 2);
8978 dreg = alloc_freg (cfg);
8981 dreg = alloc_lreg (cfg);
8984 dreg = alloc_ireg_ref (cfg);
8987 dreg = alloc_preg (cfg);
8990 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
8991 ins->type = ldind_type [*ip - CEE_LDIND_I1];
8992 ins->flags |= ins_flag;
8994 MONO_ADD_INS (bblock, ins);
8996 if (ins->flags & MONO_INST_VOLATILE) {
8997 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
8998 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
8999 emit_memory_barrier (cfg, FullBarrier);
9014 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
9015 ins->flags |= ins_flag;
9018 if (ins->flags & MONO_INST_VOLATILE) {
9019 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
9020 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
9021 emit_memory_barrier (cfg, FullBarrier);
9024 MONO_ADD_INS (bblock, ins);
9026 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0)))
9027 emit_write_barrier (cfg, sp [0], sp [1]);
9036 MONO_INST_NEW (cfg, ins, (*ip));
9038 ins->sreg1 = sp [0]->dreg;
9039 ins->sreg2 = sp [1]->dreg;
9040 type_from_op (ins, sp [0], sp [1]);
9042 ins->dreg = alloc_dreg ((cfg), (ins)->type);
9044 /* Use the immediate opcodes if possible */
9045 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
9046 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
9047 if (imm_opcode != -1) {
9048 ins->opcode = imm_opcode;
9049 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
9052 sp [1]->opcode = OP_NOP;
9056 MONO_ADD_INS ((cfg)->cbb, (ins));
9058 *sp++ = mono_decompose_opcode (cfg, ins);
9075 MONO_INST_NEW (cfg, ins, (*ip));
9077 ins->sreg1 = sp [0]->dreg;
9078 ins->sreg2 = sp [1]->dreg;
9079 type_from_op (ins, sp [0], sp [1]);
9081 ADD_WIDEN_OP (ins, sp [0], sp [1]);
9082 ins->dreg = alloc_dreg ((cfg), (ins)->type);
9084 /* FIXME: Pass opcode to is_inst_imm */
9086 /* Use the immediate opcodes if possible */
9087 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
9090 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
9091 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
9092 /* Keep emulated opcodes which are optimized away later */
9093 if ((ins->opcode == OP_IREM_UN || ins->opcode == OP_IDIV_UN_IMM) && (cfg->opt & (MONO_OPT_CONSPROP | MONO_OPT_COPYPROP)) && sp [1]->opcode == OP_ICONST && mono_is_power_of_two (sp [1]->inst_c0) >= 0) {
9094 imm_opcode = mono_op_to_op_imm (ins->opcode);
9097 if (imm_opcode != -1) {
9098 ins->opcode = imm_opcode;
9099 if (sp [1]->opcode == OP_I8CONST) {
9100 #if SIZEOF_REGISTER == 8
9101 ins->inst_imm = sp [1]->inst_l;
9103 ins->inst_ls_word = sp [1]->inst_ls_word;
9104 ins->inst_ms_word = sp [1]->inst_ms_word;
9108 ins->inst_imm = (gssize)(sp [1]->inst_c0);
9111 /* Might be followed by an instruction added by ADD_WIDEN_OP */
9112 if (sp [1]->next == NULL)
9113 sp [1]->opcode = OP_NOP;
9116 MONO_ADD_INS ((cfg)->cbb, (ins));
9118 *sp++ = mono_decompose_opcode (cfg, ins);
9131 case CEE_CONV_OVF_I8:
9132 case CEE_CONV_OVF_U8:
9136 /* Special case this earlier so we have long constants in the IR */
9137 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
9138 int data = sp [-1]->inst_c0;
9139 sp [-1]->opcode = OP_I8CONST;
9140 sp [-1]->type = STACK_I8;
9141 #if SIZEOF_REGISTER == 8
9142 if ((*ip) == CEE_CONV_U8)
9143 sp [-1]->inst_c0 = (guint32)data;
9145 sp [-1]->inst_c0 = data;
9147 sp [-1]->inst_ls_word = data;
9148 if ((*ip) == CEE_CONV_U8)
9149 sp [-1]->inst_ms_word = 0;
9151 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
9153 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
9160 case CEE_CONV_OVF_I4:
9161 case CEE_CONV_OVF_I1:
9162 case CEE_CONV_OVF_I2:
9163 case CEE_CONV_OVF_I:
9164 case CEE_CONV_OVF_U:
9167 if (sp [-1]->type == STACK_R8) {
9168 ADD_UNOP (CEE_CONV_OVF_I8);
9175 case CEE_CONV_OVF_U1:
9176 case CEE_CONV_OVF_U2:
9177 case CEE_CONV_OVF_U4:
9180 if (sp [-1]->type == STACK_R8) {
9181 ADD_UNOP (CEE_CONV_OVF_U8);
9188 case CEE_CONV_OVF_I1_UN:
9189 case CEE_CONV_OVF_I2_UN:
9190 case CEE_CONV_OVF_I4_UN:
9191 case CEE_CONV_OVF_I8_UN:
9192 case CEE_CONV_OVF_U1_UN:
9193 case CEE_CONV_OVF_U2_UN:
9194 case CEE_CONV_OVF_U4_UN:
9195 case CEE_CONV_OVF_U8_UN:
9196 case CEE_CONV_OVF_I_UN:
9197 case CEE_CONV_OVF_U_UN:
9204 CHECK_CFG_EXCEPTION;
9208 case CEE_ADD_OVF_UN:
9210 case CEE_MUL_OVF_UN:
9212 case CEE_SUB_OVF_UN:
9218 GSHAREDVT_FAILURE (*ip);
9221 token = read32 (ip + 1);
9222 klass = mini_get_class (method, token, generic_context);
9223 CHECK_TYPELOAD (klass);
9225 if (generic_class_is_reference_type (cfg, klass)) {
9226 MonoInst *store, *load;
9227 int dreg = alloc_ireg_ref (cfg);
9229 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
9230 load->flags |= ins_flag;
9231 MONO_ADD_INS (cfg->cbb, load);
9233 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
9234 store->flags |= ins_flag;
9235 MONO_ADD_INS (cfg->cbb, store);
9237 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER)
9238 emit_write_barrier (cfg, sp [0], sp [1]);
9240 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
9252 token = read32 (ip + 1);
9253 klass = mini_get_class (method, token, generic_context);
9254 CHECK_TYPELOAD (klass);
9256 /* Optimize the common ldobj+stloc combination */
9266 loc_index = ip [5] - CEE_STLOC_0;
9273 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
9274 CHECK_LOCAL (loc_index);
9276 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
9277 ins->dreg = cfg->locals [loc_index]->dreg;
9283 /* Optimize the ldobj+stobj combination */
9284 /* The reference case ends up being a load+store anyway */
9285 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass)) {
9290 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
9297 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
9306 CHECK_STACK_OVF (1);
9308 n = read32 (ip + 1);
9310 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
9311 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
9312 ins->type = STACK_OBJ;
9315 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
9316 MonoInst *iargs [1];
9318 EMIT_NEW_PCONST (cfg, iargs [0], mono_method_get_wrapper_data (method, n));
9319 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
9321 if (cfg->opt & MONO_OPT_SHARED) {
9322 MonoInst *iargs [3];
9324 if (cfg->compile_aot) {
9325 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
9327 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
9328 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
9329 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
9330 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
9331 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
9333 if (bblock->out_of_line) {
9334 MonoInst *iargs [2];
9336 if (image == mono_defaults.corlib) {
9338 * Avoid relocations in AOT and save some space by using a
9339 * version of helper_ldstr specialized to mscorlib.
9341 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
9342 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
9344 /* Avoid creating the string object */
9345 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
9346 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
9347 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
9351 if (cfg->compile_aot) {
9352 NEW_LDSTRCONST (cfg, ins, image, n);
9354 MONO_ADD_INS (bblock, ins);
9357 NEW_PCONST (cfg, ins, NULL);
9358 ins->type = STACK_OBJ;
9359 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
9361 OUT_OF_MEMORY_FAILURE;
9364 MONO_ADD_INS (bblock, ins);
9373 MonoInst *iargs [2];
9374 MonoMethodSignature *fsig;
9377 MonoInst *vtable_arg = NULL;
9380 token = read32 (ip + 1);
9381 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
9382 if (!cmethod || mono_loader_get_last_error ())
9384 fsig = mono_method_get_signature (cmethod, image, token);
9388 mono_save_token_info (cfg, image, token, cmethod);
9390 if (!mono_class_init (cmethod->klass))
9391 TYPE_LOAD_ERROR (cmethod->klass);
9393 context_used = mini_method_check_context_used (cfg, cmethod);
9395 if (mono_security_cas_enabled ()) {
9396 if (check_linkdemand (cfg, method, cmethod))
9397 INLINE_FAILURE ("linkdemand");
9398 CHECK_CFG_EXCEPTION;
9399 } else if (mono_security_core_clr_enabled ()) {
9400 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9403 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
9404 emit_generic_class_init (cfg, cmethod->klass);
9405 CHECK_TYPELOAD (cmethod->klass);
9409 if (cfg->gsharedvt) {
9410 if (mini_is_gsharedvt_variable_signature (sig))
9411 GSHAREDVT_FAILURE (*ip);
9415 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
9416 mono_method_is_generic_sharable (cmethod, TRUE)) {
9417 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
9418 mono_class_vtable (cfg->domain, cmethod->klass);
9419 CHECK_TYPELOAD (cmethod->klass);
9421 vtable_arg = emit_get_rgctx_method (cfg, context_used,
9422 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
9425 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
9426 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
9428 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
9430 CHECK_TYPELOAD (cmethod->klass);
9431 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
9436 n = fsig->param_count;
9440 * Generate smaller code for the common newobj <exception> instruction in
9441 * argument checking code.
9443 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
9444 is_exception_class (cmethod->klass) && n <= 2 &&
9445 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
9446 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
9447 MonoInst *iargs [3];
9449 g_assert (!vtable_arg);
9453 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
9456 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
9460 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
9465 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
9468 g_assert_not_reached ();
9476 /* move the args to allow room for 'this' in the first position */
9482 /* check_call_signature () requires sp[0] to be set */
9483 this_ins.type = STACK_OBJ;
9485 if (check_call_signature (cfg, fsig, sp))
9490 if (mini_class_is_system_array (cmethod->klass)) {
9491 g_assert (!vtable_arg);
9493 *sp = emit_get_rgctx_method (cfg, context_used,
9494 cmethod, MONO_RGCTX_INFO_METHOD);
9496 /* Avoid varargs in the common case */
9497 if (fsig->param_count == 1)
9498 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
9499 else if (fsig->param_count == 2)
9500 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
9501 else if (fsig->param_count == 3)
9502 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
9503 else if (fsig->param_count == 4)
9504 alloc = mono_emit_jit_icall (cfg, mono_array_new_4, sp);
9506 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
9507 } else if (cmethod->string_ctor) {
9508 g_assert (!context_used);
9509 g_assert (!vtable_arg);
9510 /* we simply pass a null pointer */
9511 EMIT_NEW_PCONST (cfg, *sp, NULL);
9512 /* now call the string ctor */
9513 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, NULL, NULL, NULL);
9515 MonoInst* callvirt_this_arg = NULL;
9517 if (cmethod->klass->valuetype) {
9518 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
9519 emit_init_rvar (cfg, iargs [0]->dreg, &cmethod->klass->byval_arg);
9520 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
9525 * The code generated by mini_emit_virtual_call () expects
9526 * iargs [0] to be a boxed instance, but luckily the vcall
9527 * will be transformed into a normal call there.
9529 } else if (context_used) {
9530 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
9533 MonoVTable *vtable = NULL;
9535 if (!cfg->compile_aot)
9536 vtable = mono_class_vtable (cfg->domain, cmethod->klass);
9537 CHECK_TYPELOAD (cmethod->klass);
9540 * TypeInitializationExceptions thrown from the mono_runtime_class_init
9541 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
9542 * As a workaround, we call class cctors before allocating objects.
9544 if (mini_field_access_needs_cctor_run (cfg, method, cmethod->klass, vtable) && !(g_slist_find (class_inits, cmethod->klass))) {
9545 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, cmethod->klass, helper_sig_class_init_trampoline, NULL);
9546 if (cfg->verbose_level > 2)
9547 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
9548 class_inits = g_slist_prepend (class_inits, cmethod->klass);
9551 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
9554 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
9557 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
9559 /* Now call the actual ctor */
9560 /* Avoid virtual calls to ctors if possible */
9561 if (mono_class_is_marshalbyref (cmethod->klass))
9562 callvirt_this_arg = sp [0];
9565 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
9566 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9567 type_to_eval_stack_type ((cfg), fsig->ret, ins);
9572 CHECK_CFG_EXCEPTION;
9573 } else if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
9574 !disable_inline && mono_method_check_inlining (cfg, cmethod) &&
9575 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE) &&
9576 !g_list_find (dont_inline, cmethod)) {
9579 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, FALSE))) {
9580 cfg->real_offset += 5;
9583 inline_costs += costs - 5;
9585 INLINE_FAILURE ("inline failure");
9586 // FIXME-VT: Clean this up
9587 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))
9588 GSHAREDVT_FAILURE(*ip);
9589 mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, callvirt_this_arg, NULL, NULL);
9591 } else if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
9594 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE);
9595 mono_emit_calli (cfg, fsig, sp, addr, NULL, vtable_arg);
9596 } else if (context_used &&
9597 (!mono_method_is_generic_sharable (cmethod, TRUE) ||
9598 !mono_class_generic_sharing_enabled (cmethod->klass))) {
9599 MonoInst *cmethod_addr;
9601 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
9602 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
9604 mono_emit_calli (cfg, fsig, sp, cmethod_addr, NULL, vtable_arg);
9606 INLINE_FAILURE ("ctor call");
9607 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp,
9608 callvirt_this_arg, NULL, vtable_arg);
9612 if (alloc == NULL) {
9614 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
9615 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
9629 token = read32 (ip + 1);
9630 klass = mini_get_class (method, token, generic_context);
9631 CHECK_TYPELOAD (klass);
9632 if (sp [0]->type != STACK_OBJ)
9635 context_used = mini_class_check_context_used (cfg, klass);
9637 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
9644 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
9647 if (cfg->compile_aot)
9648 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
9650 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
9652 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
9654 *sp++ = emit_castclass_with_cache (cfg, klass, args, &bblock);
9657 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
9658 MonoMethod *mono_castclass;
9659 MonoInst *iargs [1];
9662 mono_castclass = mono_marshal_get_castclass (klass);
9665 save_cast_details (cfg, klass, sp [0]->dreg, TRUE, &bblock);
9666 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
9667 iargs, ip, cfg->real_offset, dont_inline, TRUE);
9668 reset_cast_details (cfg);
9669 CHECK_CFG_EXCEPTION;
9670 g_assert (costs > 0);
9673 cfg->real_offset += 5;
9678 inline_costs += costs;
9681 ins = handle_castclass (cfg, klass, *sp, context_used);
9682 CHECK_CFG_EXCEPTION;
9692 token = read32 (ip + 1);
9693 klass = mini_get_class (method, token, generic_context);
9694 CHECK_TYPELOAD (klass);
9695 if (sp [0]->type != STACK_OBJ)
9698 context_used = mini_class_check_context_used (cfg, klass);
9700 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
9701 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
9708 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
9711 if (cfg->compile_aot)
9712 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
9714 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
9716 *sp++ = mono_emit_method_call (cfg, mono_isinst, args, NULL);
9719 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
9720 MonoMethod *mono_isinst;
9721 MonoInst *iargs [1];
9724 mono_isinst = mono_marshal_get_isinst (klass);
9727 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
9728 iargs, ip, cfg->real_offset, dont_inline, TRUE);
9729 CHECK_CFG_EXCEPTION;
9730 g_assert (costs > 0);
9733 cfg->real_offset += 5;
9738 inline_costs += costs;
9741 ins = handle_isinst (cfg, klass, *sp, context_used);
9742 CHECK_CFG_EXCEPTION;
9749 case CEE_UNBOX_ANY: {
9753 token = read32 (ip + 1);
9754 klass = mini_get_class (method, token, generic_context);
9755 CHECK_TYPELOAD (klass);
9757 mono_save_token_info (cfg, image, token, klass);
9759 context_used = mini_class_check_context_used (cfg, klass);
9761 if (mini_is_gsharedvt_klass (cfg, klass)) {
9762 *sp = handle_unbox_gsharedvt (cfg, klass, *sp, &bblock);
9770 if (generic_class_is_reference_type (cfg, klass)) {
9771 /* CASTCLASS FIXME kill this huge slice of duplicated code*/
9772 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
9779 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
9782 /*FIXME AOT support*/
9783 if (cfg->compile_aot)
9784 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
9786 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
9788 /* The wrapper doesn't inline well so the bloat of inlining doesn't pay off. */
9789 *sp++ = emit_castclass_with_cache (cfg, klass, args, &bblock);
9792 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
9793 MonoMethod *mono_castclass;
9794 MonoInst *iargs [1];
9797 mono_castclass = mono_marshal_get_castclass (klass);
9800 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
9801 iargs, ip, cfg->real_offset, dont_inline, TRUE);
9802 CHECK_CFG_EXCEPTION;
9803 g_assert (costs > 0);
9806 cfg->real_offset += 5;
9810 inline_costs += costs;
9812 ins = handle_castclass (cfg, klass, *sp, context_used);
9813 CHECK_CFG_EXCEPTION;
9821 if (mono_class_is_nullable (klass)) {
9822 ins = handle_unbox_nullable (cfg, *sp, klass, context_used);
9829 ins = handle_unbox (cfg, klass, sp, context_used);
9835 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
9848 token = read32 (ip + 1);
9849 klass = mini_get_class (method, token, generic_context);
9850 CHECK_TYPELOAD (klass);
9852 mono_save_token_info (cfg, image, token, klass);
9854 context_used = mini_class_check_context_used (cfg, klass);
9856 if (generic_class_is_reference_type (cfg, klass)) {
9862 if (klass == mono_defaults.void_class)
9864 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
9866 /* frequent check in generic code: box (struct), brtrue */
9868 // FIXME: LLVM can't handle the inconsistent bb linking
9869 if (!mono_class_is_nullable (klass) &&
9870 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) &&
9871 (ip [5] == CEE_BRTRUE ||
9872 ip [5] == CEE_BRTRUE_S ||
9873 ip [5] == CEE_BRFALSE ||
9874 ip [5] == CEE_BRFALSE_S)) {
9875 gboolean is_true = ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S;
9877 MonoBasicBlock *true_bb, *false_bb;
9881 if (cfg->verbose_level > 3) {
9882 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
9883 printf ("<box+brtrue opt>\n");
9891 target = ip + 1 + (signed char)(*ip);
9898 target = ip + 4 + (gint)(read32 (ip));
9902 g_assert_not_reached ();
9906 * We need to link both bblocks, since it is needed for handling stack
9907 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
9908 * Branching to only one of them would lead to inconsistencies, so
9909 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
9911 GET_BBLOCK (cfg, true_bb, target);
9912 GET_BBLOCK (cfg, false_bb, ip);
9914 mono_link_bblock (cfg, cfg->cbb, true_bb);
9915 mono_link_bblock (cfg, cfg->cbb, false_bb);
9917 if (sp != stack_start) {
9918 handle_stack_args (cfg, stack_start, sp - stack_start);
9920 CHECK_UNVERIFIABLE (cfg);
9923 if (COMPILE_LLVM (cfg)) {
9924 dreg = alloc_ireg (cfg);
9925 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
9926 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
9928 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
9930 /* The JIT can't eliminate the iconst+compare */
9931 MONO_INST_NEW (cfg, ins, OP_BR);
9932 ins->inst_target_bb = is_true ? true_bb : false_bb;
9933 MONO_ADD_INS (cfg->cbb, ins);
9936 start_new_bblock = 1;
9940 *sp++ = handle_box (cfg, val, klass, context_used, &bblock);
9942 CHECK_CFG_EXCEPTION;
9951 token = read32 (ip + 1);
9952 klass = mini_get_class (method, token, generic_context);
9953 CHECK_TYPELOAD (klass);
9955 mono_save_token_info (cfg, image, token, klass);
9957 context_used = mini_class_check_context_used (cfg, klass);
9959 if (mono_class_is_nullable (klass)) {
9962 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
9963 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
9967 ins = handle_unbox (cfg, klass, sp, context_used);
9980 MonoClassField *field;
9981 #ifndef DISABLE_REMOTING
9985 gboolean is_instance;
9987 gpointer addr = NULL;
9988 gboolean is_special_static;
9990 MonoInst *store_val = NULL;
9991 MonoInst *thread_ins;
9994 is_instance = (op == CEE_LDFLD || op == CEE_LDFLDA || op == CEE_STFLD);
9996 if (op == CEE_STFLD) {
10004 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
10006 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
10009 if (op == CEE_STSFLD) {
10012 store_val = sp [0];
10017 token = read32 (ip + 1);
10018 if (method->wrapper_type != MONO_WRAPPER_NONE) {
10019 field = mono_method_get_wrapper_data (method, token);
10020 klass = field->parent;
10023 field = mono_field_from_token (image, token, &klass, generic_context);
10027 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
10028 FIELD_ACCESS_FAILURE;
10029 mono_class_init (klass);
10031 if (is_instance && *ip != CEE_LDFLDA && is_magic_tls_access (field))
10034 /* if the class is Critical then transparent code cannot access it's fields */
10035 if (!is_instance && mono_security_core_clr_enabled ())
10036 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
10038 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
10039 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
10040 if (mono_security_core_clr_enabled ())
10041 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
10045 * LDFLD etc. is usable on static fields as well, so convert those cases to
10048 if (is_instance && field->type->attrs & FIELD_ATTRIBUTE_STATIC) {
10060 g_assert_not_reached ();
10062 is_instance = FALSE;
10065 context_used = mini_class_check_context_used (cfg, klass);
10067 /* INSTANCE CASE */
10069 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
10070 if (op == CEE_STFLD) {
10071 if (target_type_is_incompatible (cfg, field->type, sp [1]))
10073 #ifndef DISABLE_REMOTING
10074 if ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class) {
10075 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
10076 MonoInst *iargs [5];
10078 GSHAREDVT_FAILURE (op);
10080 iargs [0] = sp [0];
10081 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10082 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
10083 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
10085 iargs [4] = sp [1];
10087 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
10088 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
10089 iargs, ip, cfg->real_offset, dont_inline, TRUE);
10090 CHECK_CFG_EXCEPTION;
10091 g_assert (costs > 0);
10093 cfg->real_offset += 5;
10096 inline_costs += costs;
10098 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
10105 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
10107 if (mini_is_gsharedvt_klass (cfg, klass)) {
10108 MonoInst *offset_ins;
10110 context_used = mini_class_check_context_used (cfg, klass);
10112 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10113 dreg = alloc_ireg_mp (cfg);
10114 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10115 /* The decomposition will call mini_emit_stobj () which will emit a wbarrier if needed */
10116 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, dreg, 0, sp [1]->dreg);
10118 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
10120 if (sp [0]->opcode != OP_LDADDR)
10121 store->flags |= MONO_INST_FAULT;
10123 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
10124 /* insert call to write barrier */
10128 dreg = alloc_ireg_mp (cfg);
10129 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
10130 emit_write_barrier (cfg, ptr, sp [1]);
10133 store->flags |= ins_flag;
10140 #ifndef DISABLE_REMOTING
10141 if (is_instance && ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class)) {
10142 MonoMethod *wrapper = (op == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
10143 MonoInst *iargs [4];
10145 GSHAREDVT_FAILURE (op);
10147 iargs [0] = sp [0];
10148 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10149 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
10150 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
10151 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
10152 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
10153 iargs, ip, cfg->real_offset, dont_inline, TRUE);
10154 CHECK_CFG_EXCEPTION;
10156 g_assert (costs > 0);
10158 cfg->real_offset += 5;
10162 inline_costs += costs;
10164 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
10170 if (sp [0]->type == STACK_VTYPE) {
10173 /* Have to compute the address of the variable */
10175 var = get_vreg_to_inst (cfg, sp [0]->dreg);
10177 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
10179 g_assert (var->klass == klass);
10181 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
10185 if (op == CEE_LDFLDA) {
10186 if (is_magic_tls_access (field)) {
10187 GSHAREDVT_FAILURE (*ip);
10189 *sp++ = create_magic_tls_access (cfg, field, &cached_tls_addr, ins);
10191 if (sp [0]->type == STACK_OBJ) {
10192 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
10193 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
10196 dreg = alloc_ireg_mp (cfg);
10198 if (mini_is_gsharedvt_klass (cfg, klass)) {
10199 MonoInst *offset_ins;
10201 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10202 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10204 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
10206 ins->klass = mono_class_from_mono_type (field->type);
10207 ins->type = STACK_MP;
10213 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
10215 if (mini_is_gsharedvt_klass (cfg, klass)) {
10216 MonoInst *offset_ins;
10218 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10219 dreg = alloc_ireg_mp (cfg);
10220 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10221 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, dreg, 0);
10223 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
10225 load->flags |= ins_flag;
10226 if (sp [0]->opcode != OP_LDADDR)
10227 load->flags |= MONO_INST_FAULT;
10241 * We can only support shared generic static
10242 * field access on architectures where the
10243 * trampoline code has been extended to handle
10244 * the generic class init.
10246 #ifndef MONO_ARCH_VTABLE_REG
10247 GENERIC_SHARING_FAILURE (op);
10250 context_used = mini_class_check_context_used (cfg, klass);
10252 ftype = mono_field_get_type (field);
10254 if (ftype->attrs & FIELD_ATTRIBUTE_LITERAL)
10257 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
10258 * to be called here.
10260 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
10261 mono_class_vtable (cfg->domain, klass);
10262 CHECK_TYPELOAD (klass);
10264 mono_domain_lock (cfg->domain);
10265 if (cfg->domain->special_static_fields)
10266 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
10267 mono_domain_unlock (cfg->domain);
10269 is_special_static = mono_class_field_is_special_static (field);
10271 if (is_special_static && ((gsize)addr & 0x80000000) == 0)
10272 thread_ins = mono_get_thread_intrinsic (cfg);
10276 /* Generate IR to compute the field address */
10277 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && thread_ins && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
10279 * Fast access to TLS data
10280 * Inline version of get_thread_static_data () in
10284 int idx, static_data_reg, array_reg, dreg;
10286 GSHAREDVT_FAILURE (op);
10288 // offset &= 0x7fffffff;
10289 // idx = (offset >> 24) - 1;
10290 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
10291 MONO_ADD_INS (cfg->cbb, thread_ins);
10292 static_data_reg = alloc_ireg (cfg);
10293 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, G_STRUCT_OFFSET (MonoInternalThread, static_data));
10295 if (cfg->compile_aot) {
10296 int offset_reg, offset2_reg, idx_reg;
10298 /* For TLS variables, this will return the TLS offset */
10299 EMIT_NEW_SFLDACONST (cfg, ins, field);
10300 offset_reg = ins->dreg;
10301 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
10302 idx_reg = alloc_ireg (cfg);
10303 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
10304 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
10305 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
10306 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
10307 array_reg = alloc_ireg (cfg);
10308 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
10309 offset2_reg = alloc_ireg (cfg);
10310 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
10311 dreg = alloc_ireg (cfg);
10312 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
10314 offset = (gsize)addr & 0x7fffffff;
10315 idx = (offset >> 24) - 1;
10317 array_reg = alloc_ireg (cfg);
10318 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
10319 dreg = alloc_ireg (cfg);
10320 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, (offset & 0xffffff));
10322 } else if ((cfg->opt & MONO_OPT_SHARED) ||
10323 (cfg->compile_aot && is_special_static) ||
10324 (context_used && is_special_static)) {
10325 MonoInst *iargs [2];
10327 g_assert (field->parent);
10328 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10329 if (context_used) {
10330 iargs [1] = emit_get_rgctx_field (cfg, context_used,
10331 field, MONO_RGCTX_INFO_CLASS_FIELD);
10333 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
10335 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
10336 } else if (context_used) {
10337 MonoInst *static_data;
10340 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
10341 method->klass->name_space, method->klass->name, method->name,
10342 depth, field->offset);
10345 if (mono_class_needs_cctor_run (klass, method))
10346 emit_generic_class_init (cfg, klass);
10349 * The pointer we're computing here is
10351 * super_info.static_data + field->offset
10353 static_data = emit_get_rgctx_klass (cfg, context_used,
10354 klass, MONO_RGCTX_INFO_STATIC_DATA);
10356 if (mini_is_gsharedvt_klass (cfg, klass)) {
10357 MonoInst *offset_ins;
10359 offset_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10360 dreg = alloc_ireg_mp (cfg);
10361 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, static_data->dreg, offset_ins->dreg);
10362 } else if (field->offset == 0) {
10365 int addr_reg = mono_alloc_preg (cfg);
10366 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
10368 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
10369 MonoInst *iargs [2];
10371 g_assert (field->parent);
10372 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10373 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
10374 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
10376 MonoVTable *vtable = NULL;
10378 if (!cfg->compile_aot)
10379 vtable = mono_class_vtable (cfg->domain, klass);
10380 CHECK_TYPELOAD (klass);
10383 if (mini_field_access_needs_cctor_run (cfg, method, klass, vtable)) {
10384 if (!(g_slist_find (class_inits, klass))) {
10385 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, klass, helper_sig_class_init_trampoline, NULL);
10386 if (cfg->verbose_level > 2)
10387 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
10388 class_inits = g_slist_prepend (class_inits, klass);
10391 if (cfg->run_cctors) {
10393 /* This makes so that inline cannot trigger */
10394 /* .cctors: too many apps depend on them */
10395 /* running with a specific order... */
10397 if (! vtable->initialized)
10398 INLINE_FAILURE ("class init");
10399 ex = mono_runtime_class_init_full (vtable, FALSE);
10401 set_exception_object (cfg, ex);
10402 goto exception_exit;
10406 if (cfg->compile_aot)
10407 EMIT_NEW_SFLDACONST (cfg, ins, field);
10410 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
10412 EMIT_NEW_PCONST (cfg, ins, addr);
10415 MonoInst *iargs [1];
10416 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
10417 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
10421 /* Generate IR to do the actual load/store operation */
10423 if (op == CEE_LDSFLDA) {
10424 ins->klass = mono_class_from_mono_type (ftype);
10425 ins->type = STACK_PTR;
10427 } else if (op == CEE_STSFLD) {
10430 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, ftype, ins->dreg, 0, store_val->dreg);
10431 store->flags |= ins_flag;
10433 gboolean is_const = FALSE;
10434 MonoVTable *vtable = NULL;
10435 gpointer addr = NULL;
10437 if (!context_used) {
10438 vtable = mono_class_vtable (cfg->domain, klass);
10439 CHECK_TYPELOAD (klass);
10441 if ((ftype->attrs & FIELD_ATTRIBUTE_INIT_ONLY) && (((addr = mono_aot_readonly_field_override (field)) != NULL) ||
10442 (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) && vtable->initialized))) {
10443 int ro_type = ftype->type;
10445 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
10446 if (ro_type == MONO_TYPE_VALUETYPE && ftype->data.klass->enumtype) {
10447 ro_type = mono_class_enum_basetype (ftype->data.klass)->type;
10450 GSHAREDVT_FAILURE (op);
10452 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
10455 case MONO_TYPE_BOOLEAN:
10457 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
10461 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
10464 case MONO_TYPE_CHAR:
10466 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
10470 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
10475 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
10479 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
10484 case MONO_TYPE_PTR:
10485 case MONO_TYPE_FNPTR:
10486 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
10487 type_to_eval_stack_type ((cfg), field->type, *sp);
10490 case MONO_TYPE_STRING:
10491 case MONO_TYPE_OBJECT:
10492 case MONO_TYPE_CLASS:
10493 case MONO_TYPE_SZARRAY:
10494 case MONO_TYPE_ARRAY:
10495 if (!mono_gc_is_moving ()) {
10496 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
10497 type_to_eval_stack_type ((cfg), field->type, *sp);
10505 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
10510 case MONO_TYPE_VALUETYPE:
10520 CHECK_STACK_OVF (1);
10522 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
10523 load->flags |= ins_flag;
10536 token = read32 (ip + 1);
10537 klass = mini_get_class (method, token, generic_context);
10538 CHECK_TYPELOAD (klass);
10539 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
10540 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
10541 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
10542 generic_class_is_reference_type (cfg, klass)) {
10543 /* insert call to write barrier */
10544 emit_write_barrier (cfg, sp [0], sp [1]);
10556 const char *data_ptr;
10558 guint32 field_token;
10564 token = read32 (ip + 1);
10566 klass = mini_get_class (method, token, generic_context);
10567 CHECK_TYPELOAD (klass);
10569 context_used = mini_class_check_context_used (cfg, klass);
10571 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
10572 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_OVF_U4);
10573 ins->sreg1 = sp [0]->dreg;
10574 ins->type = STACK_I4;
10575 ins->dreg = alloc_ireg (cfg);
10576 MONO_ADD_INS (cfg->cbb, ins);
10577 *sp = mono_decompose_opcode (cfg, ins);
10580 if (context_used) {
10581 MonoInst *args [3];
10582 MonoClass *array_class = mono_array_class_get (klass, 1);
10583 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class);
10585 /* FIXME: Use OP_NEWARR and decompose later to help abcrem */
10588 args [0] = emit_get_rgctx_klass (cfg, context_used,
10589 array_class, MONO_RGCTX_INFO_VTABLE);
10594 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
10596 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
10598 if (cfg->opt & MONO_OPT_SHARED) {
10599 /* Decompose now to avoid problems with references to the domainvar */
10600 MonoInst *iargs [3];
10602 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10603 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10604 iargs [2] = sp [0];
10606 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
10608 /* Decompose later since it is needed by abcrem */
10609 MonoClass *array_type = mono_array_class_get (klass, 1);
10610 mono_class_vtable (cfg->domain, array_type);
10611 CHECK_TYPELOAD (array_type);
10613 MONO_INST_NEW (cfg, ins, OP_NEWARR);
10614 ins->dreg = alloc_ireg_ref (cfg);
10615 ins->sreg1 = sp [0]->dreg;
10616 ins->inst_newa_class = klass;
10617 ins->type = STACK_OBJ;
10618 ins->klass = array_type;
10619 MONO_ADD_INS (cfg->cbb, ins);
10620 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
10621 cfg->cbb->has_array_access = TRUE;
10623 /* Needed so mono_emit_load_get_addr () gets called */
10624 mono_get_got_var (cfg);
10634 * we inline/optimize the initialization sequence if possible.
10635 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
10636 * for small sizes open code the memcpy
10637 * ensure the rva field is big enough
10639 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
10640 MonoMethod *memcpy_method = get_memcpy_method ();
10641 MonoInst *iargs [3];
10642 int add_reg = alloc_ireg_mp (cfg);
10644 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, G_STRUCT_OFFSET (MonoArray, vector));
10645 if (cfg->compile_aot) {
10646 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
10648 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
10650 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
10651 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
10660 if (sp [0]->type != STACK_OBJ)
10663 MONO_INST_NEW (cfg, ins, OP_LDLEN);
10664 ins->dreg = alloc_preg (cfg);
10665 ins->sreg1 = sp [0]->dreg;
10666 ins->type = STACK_I4;
10667 /* This flag will be inherited by the decomposition */
10668 ins->flags |= MONO_INST_FAULT;
10669 MONO_ADD_INS (cfg->cbb, ins);
10670 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
10671 cfg->cbb->has_array_access = TRUE;
10679 if (sp [0]->type != STACK_OBJ)
10682 cfg->flags |= MONO_CFG_HAS_LDELEMA;
10684 klass = mini_get_class (method, read32 (ip + 1), generic_context);
10685 CHECK_TYPELOAD (klass);
10686 /* we need to make sure that this array is exactly the type it needs
10687 * to be for correctness. the wrappers are lax with their usage
10688 * so we need to ignore them here
10690 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
10691 MonoClass *array_class = mono_array_class_get (klass, 1);
10692 mini_emit_check_array_type (cfg, sp [0], array_class);
10693 CHECK_TYPELOAD (array_class);
10697 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
10702 case CEE_LDELEM_I1:
10703 case CEE_LDELEM_U1:
10704 case CEE_LDELEM_I2:
10705 case CEE_LDELEM_U2:
10706 case CEE_LDELEM_I4:
10707 case CEE_LDELEM_U4:
10708 case CEE_LDELEM_I8:
10710 case CEE_LDELEM_R4:
10711 case CEE_LDELEM_R8:
10712 case CEE_LDELEM_REF: {
10718 if (*ip == CEE_LDELEM) {
10720 token = read32 (ip + 1);
10721 klass = mini_get_class (method, token, generic_context);
10722 CHECK_TYPELOAD (klass);
10723 mono_class_init (klass);
10726 klass = array_access_to_klass (*ip);
10728 if (sp [0]->type != STACK_OBJ)
10731 cfg->flags |= MONO_CFG_HAS_LDELEMA;
10733 if (mini_is_gsharedvt_variable_klass (cfg, klass)) {
10734 // FIXME-VT: OP_ICONST optimization
10735 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
10736 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
10737 ins->opcode = OP_LOADV_MEMBASE;
10738 } else if (sp [1]->opcode == OP_ICONST) {
10739 int array_reg = sp [0]->dreg;
10740 int index_reg = sp [1]->dreg;
10741 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
10743 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
10744 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
10746 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
10747 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
10750 if (*ip == CEE_LDELEM)
10757 case CEE_STELEM_I1:
10758 case CEE_STELEM_I2:
10759 case CEE_STELEM_I4:
10760 case CEE_STELEM_I8:
10761 case CEE_STELEM_R4:
10762 case CEE_STELEM_R8:
10763 case CEE_STELEM_REF:
10768 cfg->flags |= MONO_CFG_HAS_LDELEMA;
10770 if (*ip == CEE_STELEM) {
10772 token = read32 (ip + 1);
10773 klass = mini_get_class (method, token, generic_context);
10774 CHECK_TYPELOAD (klass);
10775 mono_class_init (klass);
10778 klass = array_access_to_klass (*ip);
10780 if (sp [0]->type != STACK_OBJ)
10783 emit_array_store (cfg, klass, sp, TRUE);
10785 if (*ip == CEE_STELEM)
10792 case CEE_CKFINITE: {
10796 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
10797 ins->sreg1 = sp [0]->dreg;
10798 ins->dreg = alloc_freg (cfg);
10799 ins->type = STACK_R8;
10800 MONO_ADD_INS (bblock, ins);
10802 *sp++ = mono_decompose_opcode (cfg, ins);
10807 case CEE_REFANYVAL: {
10808 MonoInst *src_var, *src;
10810 int klass_reg = alloc_preg (cfg);
10811 int dreg = alloc_preg (cfg);
10813 GSHAREDVT_FAILURE (*ip);
10816 MONO_INST_NEW (cfg, ins, *ip);
10819 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
10820 CHECK_TYPELOAD (klass);
10821 mono_class_init (klass);
10823 context_used = mini_class_check_context_used (cfg, klass);
10826 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
10828 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
10829 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
10830 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass));
10832 if (context_used) {
10833 MonoInst *klass_ins;
10835 klass_ins = emit_get_rgctx_klass (cfg, context_used,
10836 klass, MONO_RGCTX_INFO_KLASS);
10839 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
10840 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
10842 mini_emit_class_check (cfg, klass_reg, klass);
10844 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, value));
10845 ins->type = STACK_MP;
10850 case CEE_MKREFANY: {
10851 MonoInst *loc, *addr;
10853 GSHAREDVT_FAILURE (*ip);
10856 MONO_INST_NEW (cfg, ins, *ip);
10859 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
10860 CHECK_TYPELOAD (klass);
10861 mono_class_init (klass);
10863 context_used = mini_class_check_context_used (cfg, klass);
10865 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
10866 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
10868 if (context_used) {
10869 MonoInst *const_ins;
10870 int type_reg = alloc_preg (cfg);
10872 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
10873 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
10874 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, G_STRUCT_OFFSET (MonoClass, byval_arg));
10875 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
10876 } else if (cfg->compile_aot) {
10877 int const_reg = alloc_preg (cfg);
10878 int type_reg = alloc_preg (cfg);
10880 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
10881 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
10882 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, G_STRUCT_OFFSET (MonoClass, byval_arg));
10883 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
10885 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
10886 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), klass);
10888 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
10890 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
10891 ins->type = STACK_VTYPE;
10892 ins->klass = mono_defaults.typed_reference_class;
10897 case CEE_LDTOKEN: {
10899 MonoClass *handle_class;
10901 CHECK_STACK_OVF (1);
10904 n = read32 (ip + 1);
10906 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
10907 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
10908 handle = mono_method_get_wrapper_data (method, n);
10909 handle_class = mono_method_get_wrapper_data (method, n + 1);
10910 if (handle_class == mono_defaults.typehandle_class)
10911 handle = &((MonoClass*)handle)->byval_arg;
10914 handle = mono_ldtoken (image, n, &handle_class, generic_context);
10918 mono_class_init (handle_class);
10919 if (cfg->generic_sharing_context) {
10920 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
10921 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
10922 /* This case handles ldtoken
10923 of an open type, like for
10926 } else if (handle_class == mono_defaults.typehandle_class) {
10927 /* If we get a MONO_TYPE_CLASS
10928 then we need to provide the
10930 instantiation of it. */
10931 if (mono_type_get_type (handle) == MONO_TYPE_CLASS)
10934 context_used = mini_class_check_context_used (cfg, mono_class_from_mono_type (handle));
10935 } else if (handle_class == mono_defaults.fieldhandle_class)
10936 context_used = mini_class_check_context_used (cfg, ((MonoClassField*)handle)->parent);
10937 else if (handle_class == mono_defaults.methodhandle_class)
10938 context_used = mini_method_check_context_used (cfg, handle);
10940 g_assert_not_reached ();
10943 if ((cfg->opt & MONO_OPT_SHARED) &&
10944 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
10945 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
10946 MonoInst *addr, *vtvar, *iargs [3];
10947 int method_context_used;
10949 method_context_used = mini_method_check_context_used (cfg, method);
10951 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
10953 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
10954 EMIT_NEW_ICONST (cfg, iargs [1], n);
10955 if (method_context_used) {
10956 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
10957 method, MONO_RGCTX_INFO_METHOD);
10958 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
10960 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
10961 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
10963 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
10965 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
10967 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
10969 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
10970 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
10971 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
10972 (cmethod->klass == mono_defaults.systemtype_class) &&
10973 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
10974 MonoClass *tclass = mono_class_from_mono_type (handle);
10976 mono_class_init (tclass);
10977 if (context_used) {
10978 ins = emit_get_rgctx_klass (cfg, context_used,
10979 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
10980 } else if (cfg->compile_aot) {
10981 if (method->wrapper_type) {
10982 if (mono_class_get (tclass->image, tclass->type_token) == tclass && !generic_context) {
10983 /* Special case for static synchronized wrappers */
10984 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
10986 /* FIXME: n is not a normal token */
10988 EMIT_NEW_PCONST (cfg, ins, NULL);
10991 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
10994 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
10996 ins->type = STACK_OBJ;
10997 ins->klass = cmethod->klass;
11000 MonoInst *addr, *vtvar;
11002 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
11004 if (context_used) {
11005 if (handle_class == mono_defaults.typehandle_class) {
11006 ins = emit_get_rgctx_klass (cfg, context_used,
11007 mono_class_from_mono_type (handle),
11008 MONO_RGCTX_INFO_TYPE);
11009 } else if (handle_class == mono_defaults.methodhandle_class) {
11010 ins = emit_get_rgctx_method (cfg, context_used,
11011 handle, MONO_RGCTX_INFO_METHOD);
11012 } else if (handle_class == mono_defaults.fieldhandle_class) {
11013 ins = emit_get_rgctx_field (cfg, context_used,
11014 handle, MONO_RGCTX_INFO_CLASS_FIELD);
11016 g_assert_not_reached ();
11018 } else if (cfg->compile_aot) {
11019 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n, generic_context);
11021 EMIT_NEW_PCONST (cfg, ins, handle);
11023 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11024 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
11025 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11035 MONO_INST_NEW (cfg, ins, OP_THROW);
11037 ins->sreg1 = sp [0]->dreg;
11039 bblock->out_of_line = TRUE;
11040 MONO_ADD_INS (bblock, ins);
11041 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
11042 MONO_ADD_INS (bblock, ins);
11045 link_bblock (cfg, bblock, end_bblock);
11046 start_new_bblock = 1;
11048 case CEE_ENDFINALLY:
11049 /* mono_save_seq_point_info () depends on this */
11050 if (sp != stack_start)
11051 emit_seq_point (cfg, method, ip, FALSE, FALSE);
11052 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
11053 MONO_ADD_INS (bblock, ins);
11055 start_new_bblock = 1;
11058 * Control will leave the method so empty the stack, otherwise
11059 * the next basic block will start with a nonempty stack.
11061 while (sp != stack_start) {
11066 case CEE_LEAVE_S: {
11069 if (*ip == CEE_LEAVE) {
11071 target = ip + 5 + (gint32)read32(ip + 1);
11074 target = ip + 2 + (signed char)(ip [1]);
11077 /* empty the stack */
11078 while (sp != stack_start) {
11083 * If this leave statement is in a catch block, check for a
11084 * pending exception, and rethrow it if necessary.
11085 * We avoid doing this in runtime invoke wrappers, since those are called
11086 * by native code which excepts the wrapper to catch all exceptions.
11088 for (i = 0; i < header->num_clauses; ++i) {
11089 MonoExceptionClause *clause = &header->clauses [i];
11092 * Use <= in the final comparison to handle clauses with multiple
11093 * leave statements, like in bug #78024.
11094 * The ordering of the exception clauses guarantees that we find the
11095 * innermost clause.
11097 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
11099 MonoBasicBlock *dont_throw;
11104 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
11107 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
11109 NEW_BBLOCK (cfg, dont_throw);
11112 * Currently, we always rethrow the abort exception, despite the
11113 * fact that this is not correct. See thread6.cs for an example.
11114 * But propagating the abort exception is more important than
11115 * getting the sematics right.
11117 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
11118 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
11119 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
11121 MONO_START_BB (cfg, dont_throw);
11126 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
11128 MonoExceptionClause *clause;
11130 for (tmp = handlers; tmp; tmp = tmp->next) {
11131 clause = tmp->data;
11132 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
11134 link_bblock (cfg, bblock, tblock);
11135 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
11136 ins->inst_target_bb = tblock;
11137 ins->inst_eh_block = clause;
11138 MONO_ADD_INS (bblock, ins);
11139 bblock->has_call_handler = 1;
11140 if (COMPILE_LLVM (cfg)) {
11141 MonoBasicBlock *target_bb;
11144 * Link the finally bblock with the target, since it will
11145 * conceptually branch there.
11146 * FIXME: Have to link the bblock containing the endfinally.
11148 GET_BBLOCK (cfg, target_bb, target);
11149 link_bblock (cfg, tblock, target_bb);
11152 g_list_free (handlers);
11155 MONO_INST_NEW (cfg, ins, OP_BR);
11156 MONO_ADD_INS (bblock, ins);
11157 GET_BBLOCK (cfg, tblock, target);
11158 link_bblock (cfg, bblock, tblock);
11159 ins->inst_target_bb = tblock;
11160 start_new_bblock = 1;
11162 if (*ip == CEE_LEAVE)
11171 * Mono specific opcodes
11173 case MONO_CUSTOM_PREFIX: {
11175 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
11179 case CEE_MONO_ICALL: {
11181 MonoJitICallInfo *info;
11183 token = read32 (ip + 2);
11184 func = mono_method_get_wrapper_data (method, token);
11185 info = mono_find_jit_icall_by_addr (func);
11187 g_error ("Could not find icall address in wrapper %s", mono_method_full_name (method, 1));
11190 CHECK_STACK (info->sig->param_count);
11191 sp -= info->sig->param_count;
11193 ins = mono_emit_jit_icall (cfg, info->func, sp);
11194 if (!MONO_TYPE_IS_VOID (info->sig->ret))
11198 inline_costs += 10 * num_calls++;
11202 case CEE_MONO_LDPTR: {
11205 CHECK_STACK_OVF (1);
11207 token = read32 (ip + 2);
11209 ptr = mono_method_get_wrapper_data (method, token);
11210 /* FIXME: Generalize this */
11211 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
11212 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
11217 EMIT_NEW_PCONST (cfg, ins, ptr);
11220 inline_costs += 10 * num_calls++;
11221 /* Can't embed random pointers into AOT code */
11225 case CEE_MONO_JIT_ICALL_ADDR: {
11226 MonoJitICallInfo *callinfo;
11229 CHECK_STACK_OVF (1);
11231 token = read32 (ip + 2);
11233 ptr = mono_method_get_wrapper_data (method, token);
11234 callinfo = mono_find_jit_icall_by_addr (ptr);
11235 g_assert (callinfo);
11236 EMIT_NEW_JIT_ICALL_ADDRCONST (cfg, ins, (char*)callinfo->name);
11239 inline_costs += 10 * num_calls++;
11242 case CEE_MONO_ICALL_ADDR: {
11243 MonoMethod *cmethod;
11246 CHECK_STACK_OVF (1);
11248 token = read32 (ip + 2);
11250 cmethod = mono_method_get_wrapper_data (method, token);
11252 if (cfg->compile_aot) {
11253 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
11255 ptr = mono_lookup_internal_call (cmethod);
11257 EMIT_NEW_PCONST (cfg, ins, ptr);
11263 case CEE_MONO_VTADDR: {
11264 MonoInst *src_var, *src;
11270 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
11271 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
11276 case CEE_MONO_NEWOBJ: {
11277 MonoInst *iargs [2];
11279 CHECK_STACK_OVF (1);
11281 token = read32 (ip + 2);
11282 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
11283 mono_class_init (klass);
11284 NEW_DOMAINCONST (cfg, iargs [0]);
11285 MONO_ADD_INS (cfg->cbb, iargs [0]);
11286 NEW_CLASSCONST (cfg, iargs [1], klass);
11287 MONO_ADD_INS (cfg->cbb, iargs [1]);
11288 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
11290 inline_costs += 10 * num_calls++;
11293 case CEE_MONO_OBJADDR:
11296 MONO_INST_NEW (cfg, ins, OP_MOVE);
11297 ins->dreg = alloc_ireg_mp (cfg);
11298 ins->sreg1 = sp [0]->dreg;
11299 ins->type = STACK_MP;
11300 MONO_ADD_INS (cfg->cbb, ins);
11304 case CEE_MONO_LDNATIVEOBJ:
11306 * Similar to LDOBJ, but instead load the unmanaged
11307 * representation of the vtype to the stack.
11312 token = read32 (ip + 2);
11313 klass = mono_method_get_wrapper_data (method, token);
11314 g_assert (klass->valuetype);
11315 mono_class_init (klass);
11318 MonoInst *src, *dest, *temp;
11321 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
11322 temp->backend.is_pinvoke = 1;
11323 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
11324 mini_emit_stobj (cfg, dest, src, klass, TRUE);
11326 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
11327 dest->type = STACK_VTYPE;
11328 dest->klass = klass;
11334 case CEE_MONO_RETOBJ: {
11336 * Same as RET, but return the native representation of a vtype
11339 g_assert (cfg->ret);
11340 g_assert (mono_method_signature (method)->pinvoke);
11345 token = read32 (ip + 2);
11346 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
11348 if (!cfg->vret_addr) {
11349 g_assert (cfg->ret_var_is_local);
11351 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
11353 EMIT_NEW_RETLOADA (cfg, ins);
11355 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
11357 if (sp != stack_start)
11360 MONO_INST_NEW (cfg, ins, OP_BR);
11361 ins->inst_target_bb = end_bblock;
11362 MONO_ADD_INS (bblock, ins);
11363 link_bblock (cfg, bblock, end_bblock);
11364 start_new_bblock = 1;
11368 case CEE_MONO_CISINST:
11369 case CEE_MONO_CCASTCLASS: {
11374 token = read32 (ip + 2);
11375 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
11376 if (ip [1] == CEE_MONO_CISINST)
11377 ins = handle_cisinst (cfg, klass, sp [0]);
11379 ins = handle_ccastclass (cfg, klass, sp [0]);
11385 case CEE_MONO_SAVE_LMF:
11386 case CEE_MONO_RESTORE_LMF:
11387 #ifdef MONO_ARCH_HAVE_LMF_OPS
11388 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
11389 MONO_ADD_INS (bblock, ins);
11390 cfg->need_lmf_area = TRUE;
11394 case CEE_MONO_CLASSCONST:
11395 CHECK_STACK_OVF (1);
11397 token = read32 (ip + 2);
11398 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
11401 inline_costs += 10 * num_calls++;
11403 case CEE_MONO_NOT_TAKEN:
11404 bblock->out_of_line = TRUE;
11407 case CEE_MONO_TLS: {
11410 CHECK_STACK_OVF (1);
11412 key = (gint32)read32 (ip + 2);
11413 g_assert (key < TLS_KEY_NUM);
11415 ins = mono_create_tls_get (cfg, key);
11417 if (cfg->compile_aot) {
11419 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
11420 ins->dreg = alloc_preg (cfg);
11421 ins->type = STACK_PTR;
11423 g_assert_not_reached ();
11426 ins->type = STACK_PTR;
11427 MONO_ADD_INS (bblock, ins);
11432 case CEE_MONO_DYN_CALL: {
11433 MonoCallInst *call;
11435 /* It would be easier to call a trampoline, but that would put an
11436 * extra frame on the stack, confusing exception handling. So
11437 * implement it inline using an opcode for now.
11440 if (!cfg->dyn_call_var) {
11441 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
11442 /* prevent it from being register allocated */
11443 cfg->dyn_call_var->flags |= MONO_INST_VOLATILE;
11446 /* Has to use a call inst since it local regalloc expects it */
11447 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
11448 ins = (MonoInst*)call;
11450 ins->sreg1 = sp [0]->dreg;
11451 ins->sreg2 = sp [1]->dreg;
11452 MONO_ADD_INS (bblock, ins);
11454 cfg->param_area = MAX (cfg->param_area, MONO_ARCH_DYN_CALL_PARAM_AREA);
11457 inline_costs += 10 * num_calls++;
11461 case CEE_MONO_MEMORY_BARRIER: {
11463 emit_memory_barrier (cfg, (int)read32 (ip + 1));
11467 case CEE_MONO_JIT_ATTACH: {
11468 MonoInst *args [16];
11469 MonoInst *ad_ins, *lmf_ins;
11470 MonoBasicBlock *next_bb = NULL;
11472 cfg->orig_domain_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
11474 EMIT_NEW_PCONST (cfg, ins, NULL);
11475 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
11481 ad_ins = mono_get_domain_intrinsic (cfg);
11482 lmf_ins = mono_get_lmf_intrinsic (cfg);
11485 if (MONO_ARCH_HAVE_TLS_GET && ad_ins && lmf_ins) {
11486 NEW_BBLOCK (cfg, next_bb);
11488 MONO_ADD_INS (cfg->cbb, ad_ins);
11489 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, ad_ins->dreg, 0);
11490 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, next_bb);
11492 MONO_ADD_INS (cfg->cbb, lmf_ins);
11493 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, lmf_ins->dreg, 0);
11494 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, next_bb);
11497 if (cfg->compile_aot) {
11498 /* AOT code is only used in the root domain */
11499 EMIT_NEW_PCONST (cfg, args [0], NULL);
11501 EMIT_NEW_PCONST (cfg, args [0], cfg->domain);
11503 ins = mono_emit_jit_icall (cfg, mono_jit_thread_attach, args);
11504 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
11507 MONO_START_BB (cfg, next_bb);
11513 case CEE_MONO_JIT_DETACH: {
11514 MonoInst *args [16];
11516 /* Restore the original domain */
11517 dreg = alloc_ireg (cfg);
11518 EMIT_NEW_UNALU (cfg, args [0], OP_MOVE, dreg, cfg->orig_domain_var->dreg);
11519 mono_emit_jit_icall (cfg, mono_jit_set_domain, args);
11524 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
11530 case CEE_PREFIX1: {
11533 case CEE_ARGLIST: {
11534 /* somewhat similar to LDTOKEN */
11535 MonoInst *addr, *vtvar;
11536 CHECK_STACK_OVF (1);
11537 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
11539 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11540 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
11542 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11543 ins->type = STACK_VTYPE;
11544 ins->klass = mono_defaults.argumenthandle_class;
11557 * The following transforms:
11558 * CEE_CEQ into OP_CEQ
11559 * CEE_CGT into OP_CGT
11560 * CEE_CGT_UN into OP_CGT_UN
11561 * CEE_CLT into OP_CLT
11562 * CEE_CLT_UN into OP_CLT_UN
11564 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
11566 MONO_INST_NEW (cfg, ins, cmp->opcode);
11568 cmp->sreg1 = sp [0]->dreg;
11569 cmp->sreg2 = sp [1]->dreg;
11570 type_from_op (cmp, sp [0], sp [1]);
11572 if ((sp [0]->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
11573 cmp->opcode = OP_LCOMPARE;
11574 else if (sp [0]->type == STACK_R8)
11575 cmp->opcode = OP_FCOMPARE;
11577 cmp->opcode = OP_ICOMPARE;
11578 MONO_ADD_INS (bblock, cmp);
11579 ins->type = STACK_I4;
11580 ins->dreg = alloc_dreg (cfg, ins->type);
11581 type_from_op (ins, sp [0], sp [1]);
11583 if (cmp->opcode == OP_FCOMPARE) {
11585 * The backends expect the fceq opcodes to do the
11588 cmp->opcode = OP_NOP;
11589 ins->sreg1 = cmp->sreg1;
11590 ins->sreg2 = cmp->sreg2;
11592 MONO_ADD_INS (bblock, ins);
11598 MonoInst *argconst;
11599 MonoMethod *cil_method;
11601 CHECK_STACK_OVF (1);
11603 n = read32 (ip + 2);
11604 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
11605 if (!cmethod || mono_loader_get_last_error ())
11607 mono_class_init (cmethod->klass);
11609 mono_save_token_info (cfg, image, n, cmethod);
11611 context_used = mini_method_check_context_used (cfg, cmethod);
11613 cil_method = cmethod;
11614 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
11615 METHOD_ACCESS_FAILURE;
11617 if (mono_security_cas_enabled ()) {
11618 if (check_linkdemand (cfg, method, cmethod))
11619 INLINE_FAILURE ("linkdemand");
11620 CHECK_CFG_EXCEPTION;
11621 } else if (mono_security_core_clr_enabled ()) {
11622 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
11626 * Optimize the common case of ldftn+delegate creation
11628 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
11629 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
11630 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
11631 MonoInst *target_ins;
11632 MonoMethod *invoke;
11633 int invoke_context_used;
11635 invoke = mono_get_delegate_invoke (ctor_method->klass);
11636 if (!invoke || !mono_method_signature (invoke))
11639 invoke_context_used = mini_method_check_context_used (cfg, invoke);
11641 target_ins = sp [-1];
11643 if (mono_security_core_clr_enabled ())
11644 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method, bblock, ip);
11646 if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
11647 /*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
11648 if (mono_method_signature (invoke)->param_count == mono_method_signature (cmethod)->param_count) {
11649 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0);
11650 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException");
11654 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE)
11655 /* FIXME: SGEN support */
11656 if (invoke_context_used == 0) {
11658 if (cfg->verbose_level > 3)
11659 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
11661 *sp = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used);
11662 CHECK_CFG_EXCEPTION;
11671 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
11672 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
11676 inline_costs += 10 * num_calls++;
11679 case CEE_LDVIRTFTN: {
11680 MonoInst *args [2];
11684 n = read32 (ip + 2);
11685 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
11686 if (!cmethod || mono_loader_get_last_error ())
11688 mono_class_init (cmethod->klass);
11690 context_used = mini_method_check_context_used (cfg, cmethod);
11692 if (mono_security_cas_enabled ()) {
11693 if (check_linkdemand (cfg, method, cmethod))
11694 INLINE_FAILURE ("linkdemand");
11695 CHECK_CFG_EXCEPTION;
11696 } else if (mono_security_core_clr_enabled ()) {
11697 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
11703 args [1] = emit_get_rgctx_method (cfg, context_used,
11704 cmethod, MONO_RGCTX_INFO_METHOD);
11707 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
11709 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
11712 inline_costs += 10 * num_calls++;
11716 CHECK_STACK_OVF (1);
11718 n = read16 (ip + 2);
11720 EMIT_NEW_ARGLOAD (cfg, ins, n);
11725 CHECK_STACK_OVF (1);
11727 n = read16 (ip + 2);
11729 NEW_ARGLOADA (cfg, ins, n);
11730 MONO_ADD_INS (cfg->cbb, ins);
11738 n = read16 (ip + 2);
11740 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
11742 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
11746 CHECK_STACK_OVF (1);
11748 n = read16 (ip + 2);
11750 EMIT_NEW_LOCLOAD (cfg, ins, n);
11755 unsigned char *tmp_ip;
11756 CHECK_STACK_OVF (1);
11758 n = read16 (ip + 2);
11761 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
11767 EMIT_NEW_LOCLOADA (cfg, ins, n);
11776 n = read16 (ip + 2);
11778 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
11780 emit_stloc_ir (cfg, sp, header, n);
11787 if (sp != stack_start)
11789 if (cfg->method != method)
11791 * Inlining this into a loop in a parent could lead to
11792 * stack overflows which is different behavior than the
11793 * non-inlined case, thus disable inlining in this case.
11795 goto inline_failure;
11797 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
11798 ins->dreg = alloc_preg (cfg);
11799 ins->sreg1 = sp [0]->dreg;
11800 ins->type = STACK_PTR;
11801 MONO_ADD_INS (cfg->cbb, ins);
11803 cfg->flags |= MONO_CFG_HAS_ALLOCA;
11805 ins->flags |= MONO_INST_INIT;
11810 case CEE_ENDFILTER: {
11811 MonoExceptionClause *clause, *nearest;
11812 int cc, nearest_num;
11816 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
11818 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
11819 ins->sreg1 = (*sp)->dreg;
11820 MONO_ADD_INS (bblock, ins);
11821 start_new_bblock = 1;
11826 for (cc = 0; cc < header->num_clauses; ++cc) {
11827 clause = &header->clauses [cc];
11828 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
11829 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
11830 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
11835 g_assert (nearest);
11836 if ((ip - header->code) != nearest->handler_offset)
11841 case CEE_UNALIGNED_:
11842 ins_flag |= MONO_INST_UNALIGNED;
11843 /* FIXME: record alignment? we can assume 1 for now */
11847 case CEE_VOLATILE_:
11848 ins_flag |= MONO_INST_VOLATILE;
11852 ins_flag |= MONO_INST_TAILCALL;
11853 cfg->flags |= MONO_CFG_HAS_TAIL;
11854 /* Can't inline tail calls at this time */
11855 inline_costs += 100000;
11862 token = read32 (ip + 2);
11863 klass = mini_get_class (method, token, generic_context);
11864 CHECK_TYPELOAD (klass);
11865 if (generic_class_is_reference_type (cfg, klass))
11866 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
11868 mini_emit_initobj (cfg, *sp, NULL, klass);
11872 case CEE_CONSTRAINED_:
11874 token = read32 (ip + 2);
11875 constrained_call = mini_get_class (method, token, generic_context);
11876 CHECK_TYPELOAD (constrained_call);
11880 case CEE_INITBLK: {
11881 MonoInst *iargs [3];
11885 if ((ip [1] == CEE_CPBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
11886 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
11887 } else if ((ip [1] == CEE_INITBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
11888 /* emit_memset only works when val == 0 */
11889 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
11891 iargs [0] = sp [0];
11892 iargs [1] = sp [1];
11893 iargs [2] = sp [2];
11894 if (ip [1] == CEE_CPBLK) {
11895 MonoMethod *memcpy_method = get_memcpy_method ();
11896 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
11898 MonoMethod *memset_method = get_memset_method ();
11899 mono_emit_method_call (cfg, memset_method, iargs, NULL);
11909 ins_flag |= MONO_INST_NOTYPECHECK;
11911 ins_flag |= MONO_INST_NORANGECHECK;
11912 /* we ignore the no-nullcheck for now since we
11913 * really do it explicitly only when doing callvirt->call
11917 case CEE_RETHROW: {
11919 int handler_offset = -1;
11921 for (i = 0; i < header->num_clauses; ++i) {
11922 MonoExceptionClause *clause = &header->clauses [i];
11923 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
11924 handler_offset = clause->handler_offset;
11929 bblock->flags |= BB_EXCEPTION_UNSAFE;
11931 g_assert (handler_offset != -1);
11933 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
11934 MONO_INST_NEW (cfg, ins, OP_RETHROW);
11935 ins->sreg1 = load->dreg;
11936 MONO_ADD_INS (bblock, ins);
11938 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
11939 MONO_ADD_INS (bblock, ins);
11942 link_bblock (cfg, bblock, end_bblock);
11943 start_new_bblock = 1;
11951 GSHAREDVT_FAILURE (*ip);
11953 CHECK_STACK_OVF (1);
11955 token = read32 (ip + 2);
11956 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !method->klass->image->dynamic && !generic_context) {
11957 MonoType *type = mono_type_create_from_typespec (image, token);
11958 val = mono_type_size (type, &ialign);
11960 MonoClass *klass = mono_class_get_full (image, token, generic_context);
11961 CHECK_TYPELOAD (klass);
11962 mono_class_init (klass);
11963 val = mono_type_size (&klass->byval_arg, &ialign);
11965 EMIT_NEW_ICONST (cfg, ins, val);
11970 case CEE_REFANYTYPE: {
11971 MonoInst *src_var, *src;
11973 GSHAREDVT_FAILURE (*ip);
11979 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
11981 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
11982 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
11983 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, type));
11988 case CEE_READONLY_:
12001 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
12011 g_warning ("opcode 0x%02x not handled", *ip);
12015 if (start_new_bblock != 1)
12018 bblock->cil_length = ip - bblock->cil_code;
12019 if (bblock->next_bb) {
12020 /* This could already be set because of inlining, #693905 */
12021 MonoBasicBlock *bb = bblock;
12023 while (bb->next_bb)
12025 bb->next_bb = end_bblock;
12027 bblock->next_bb = end_bblock;
12030 if (cfg->method == method && cfg->domainvar) {
12032 MonoInst *get_domain;
12034 cfg->cbb = init_localsbb;
12036 if ((get_domain = mono_get_domain_intrinsic (cfg))) {
12037 MONO_ADD_INS (cfg->cbb, get_domain);
12039 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
12041 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
12042 MONO_ADD_INS (cfg->cbb, store);
12045 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
12046 if (cfg->compile_aot)
12047 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
12048 mono_get_got_var (cfg);
12051 if (cfg->method == method && cfg->got_var)
12052 mono_emit_load_got_addr (cfg);
12054 if (init_localsbb) {
12055 cfg->cbb = init_localsbb;
12057 for (i = 0; i < header->num_locals; ++i) {
12058 emit_init_local (cfg, i, header->locals [i], init_locals);
12062 if (cfg->init_ref_vars && cfg->method == method) {
12063 /* Emit initialization for ref vars */
12064 // FIXME: Avoid duplication initialization for IL locals.
12065 for (i = 0; i < cfg->num_varinfo; ++i) {
12066 MonoInst *ins = cfg->varinfo [i];
12068 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
12069 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
12073 if (cfg->lmf_var && cfg->method == method) {
12074 cfg->cbb = init_localsbb;
12075 emit_push_lmf (cfg);
12079 MonoBasicBlock *bb;
12082 * Make seq points at backward branch targets interruptable.
12084 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
12085 if (bb->code && bb->in_count > 1 && bb->code->opcode == OP_SEQ_POINT)
12086 bb->code->flags |= MONO_INST_SINGLE_STEP_LOC;
12089 /* Add a sequence point for method entry/exit events */
12091 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
12092 MONO_ADD_INS (init_localsbb, ins);
12093 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
12094 MONO_ADD_INS (cfg->bb_exit, ins);
12098 * Add seq points for IL offsets which have line number info, but wasn't generated a seq point during JITting because
12099 * the code they refer to was dead (#11880).
12101 if (sym_seq_points) {
12102 for (i = 0; i < header->code_size; ++i) {
12103 if (mono_bitset_test_fast (seq_point_locs, i) && !mono_bitset_test_fast (seq_point_set_locs, i)) {
12106 NEW_SEQ_POINT (cfg, ins, i, FALSE);
12107 mono_add_seq_point (cfg, NULL, ins, SEQ_POINT_NATIVE_OFFSET_DEAD_CODE);
12114 if (cfg->method == method) {
12115 MonoBasicBlock *bb;
12116 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
12117 bb->region = mono_find_block_region (cfg, bb->real_offset);
12119 mono_create_spvar_for_region (cfg, bb->region);
12120 if (cfg->verbose_level > 2)
12121 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
12125 g_slist_free (class_inits);
12126 dont_inline = g_list_remove (dont_inline, method);
12128 if (inline_costs < 0) {
12131 /* Method is too large */
12132 mname = mono_method_full_name (method, TRUE);
12133 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
12134 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
12136 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
12137 mono_basic_block_free (original_bb);
12141 if ((cfg->verbose_level > 2) && (cfg->method == method))
12142 mono_print_code (cfg, "AFTER METHOD-TO-IR");
12144 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
12145 mono_basic_block_free (original_bb);
12146 return inline_costs;
12149 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
12156 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
12160 set_exception_type_from_invalid_il (cfg, method, ip);
12164 g_slist_free (class_inits);
12165 mono_basic_block_free (original_bb);
12166 dont_inline = g_list_remove (dont_inline, method);
12167 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
12172 store_membase_reg_to_store_membase_imm (int opcode)
12175 case OP_STORE_MEMBASE_REG:
12176 return OP_STORE_MEMBASE_IMM;
12177 case OP_STOREI1_MEMBASE_REG:
12178 return OP_STOREI1_MEMBASE_IMM;
12179 case OP_STOREI2_MEMBASE_REG:
12180 return OP_STOREI2_MEMBASE_IMM;
12181 case OP_STOREI4_MEMBASE_REG:
12182 return OP_STOREI4_MEMBASE_IMM;
12183 case OP_STOREI8_MEMBASE_REG:
12184 return OP_STOREI8_MEMBASE_IMM;
12186 g_assert_not_reached ();
12193 mono_op_to_op_imm (int opcode)
12197 return OP_IADD_IMM;
12199 return OP_ISUB_IMM;
12201 return OP_IDIV_IMM;
12203 return OP_IDIV_UN_IMM;
12205 return OP_IREM_IMM;
12207 return OP_IREM_UN_IMM;
12209 return OP_IMUL_IMM;
12211 return OP_IAND_IMM;
12215 return OP_IXOR_IMM;
12217 return OP_ISHL_IMM;
12219 return OP_ISHR_IMM;
12221 return OP_ISHR_UN_IMM;
12224 return OP_LADD_IMM;
12226 return OP_LSUB_IMM;
12228 return OP_LAND_IMM;
12232 return OP_LXOR_IMM;
12234 return OP_LSHL_IMM;
12236 return OP_LSHR_IMM;
12238 return OP_LSHR_UN_IMM;
12241 return OP_COMPARE_IMM;
12243 return OP_ICOMPARE_IMM;
12245 return OP_LCOMPARE_IMM;
12247 case OP_STORE_MEMBASE_REG:
12248 return OP_STORE_MEMBASE_IMM;
12249 case OP_STOREI1_MEMBASE_REG:
12250 return OP_STOREI1_MEMBASE_IMM;
12251 case OP_STOREI2_MEMBASE_REG:
12252 return OP_STOREI2_MEMBASE_IMM;
12253 case OP_STOREI4_MEMBASE_REG:
12254 return OP_STOREI4_MEMBASE_IMM;
12256 #if defined(TARGET_X86) || defined (TARGET_AMD64)
12258 return OP_X86_PUSH_IMM;
12259 case OP_X86_COMPARE_MEMBASE_REG:
12260 return OP_X86_COMPARE_MEMBASE_IMM;
12262 #if defined(TARGET_AMD64)
12263 case OP_AMD64_ICOMPARE_MEMBASE_REG:
12264 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
12266 case OP_VOIDCALL_REG:
12267 return OP_VOIDCALL;
12275 return OP_LOCALLOC_IMM;
12282 ldind_to_load_membase (int opcode)
12286 return OP_LOADI1_MEMBASE;
12288 return OP_LOADU1_MEMBASE;
12290 return OP_LOADI2_MEMBASE;
12292 return OP_LOADU2_MEMBASE;
12294 return OP_LOADI4_MEMBASE;
12296 return OP_LOADU4_MEMBASE;
12298 return OP_LOAD_MEMBASE;
12299 case CEE_LDIND_REF:
12300 return OP_LOAD_MEMBASE;
12302 return OP_LOADI8_MEMBASE;
12304 return OP_LOADR4_MEMBASE;
12306 return OP_LOADR8_MEMBASE;
12308 g_assert_not_reached ();
12315 stind_to_store_membase (int opcode)
12319 return OP_STOREI1_MEMBASE_REG;
12321 return OP_STOREI2_MEMBASE_REG;
12323 return OP_STOREI4_MEMBASE_REG;
12325 case CEE_STIND_REF:
12326 return OP_STORE_MEMBASE_REG;
12328 return OP_STOREI8_MEMBASE_REG;
12330 return OP_STORER4_MEMBASE_REG;
12332 return OP_STORER8_MEMBASE_REG;
12334 g_assert_not_reached ();
12341 mono_load_membase_to_load_mem (int opcode)
12343 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
12344 #if defined(TARGET_X86) || defined(TARGET_AMD64)
12346 case OP_LOAD_MEMBASE:
12347 return OP_LOAD_MEM;
12348 case OP_LOADU1_MEMBASE:
12349 return OP_LOADU1_MEM;
12350 case OP_LOADU2_MEMBASE:
12351 return OP_LOADU2_MEM;
12352 case OP_LOADI4_MEMBASE:
12353 return OP_LOADI4_MEM;
12354 case OP_LOADU4_MEMBASE:
12355 return OP_LOADU4_MEM;
12356 #if SIZEOF_REGISTER == 8
12357 case OP_LOADI8_MEMBASE:
12358 return OP_LOADI8_MEM;
12367 op_to_op_dest_membase (int store_opcode, int opcode)
12369 #if defined(TARGET_X86)
12370 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
12375 return OP_X86_ADD_MEMBASE_REG;
12377 return OP_X86_SUB_MEMBASE_REG;
12379 return OP_X86_AND_MEMBASE_REG;
12381 return OP_X86_OR_MEMBASE_REG;
12383 return OP_X86_XOR_MEMBASE_REG;
12386 return OP_X86_ADD_MEMBASE_IMM;
12389 return OP_X86_SUB_MEMBASE_IMM;
12392 return OP_X86_AND_MEMBASE_IMM;
12395 return OP_X86_OR_MEMBASE_IMM;
12398 return OP_X86_XOR_MEMBASE_IMM;
12404 #if defined(TARGET_AMD64)
12405 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
12410 return OP_X86_ADD_MEMBASE_REG;
12412 return OP_X86_SUB_MEMBASE_REG;
12414 return OP_X86_AND_MEMBASE_REG;
12416 return OP_X86_OR_MEMBASE_REG;
12418 return OP_X86_XOR_MEMBASE_REG;
12420 return OP_X86_ADD_MEMBASE_IMM;
12422 return OP_X86_SUB_MEMBASE_IMM;
12424 return OP_X86_AND_MEMBASE_IMM;
12426 return OP_X86_OR_MEMBASE_IMM;
12428 return OP_X86_XOR_MEMBASE_IMM;
12430 return OP_AMD64_ADD_MEMBASE_REG;
12432 return OP_AMD64_SUB_MEMBASE_REG;
12434 return OP_AMD64_AND_MEMBASE_REG;
12436 return OP_AMD64_OR_MEMBASE_REG;
12438 return OP_AMD64_XOR_MEMBASE_REG;
12441 return OP_AMD64_ADD_MEMBASE_IMM;
12444 return OP_AMD64_SUB_MEMBASE_IMM;
12447 return OP_AMD64_AND_MEMBASE_IMM;
12450 return OP_AMD64_OR_MEMBASE_IMM;
12453 return OP_AMD64_XOR_MEMBASE_IMM;
12463 op_to_op_store_membase (int store_opcode, int opcode)
12465 #if defined(TARGET_X86) || defined(TARGET_AMD64)
12468 if (store_opcode == OP_STOREI1_MEMBASE_REG)
12469 return OP_X86_SETEQ_MEMBASE;
12471 if (store_opcode == OP_STOREI1_MEMBASE_REG)
12472 return OP_X86_SETNE_MEMBASE;
12480 op_to_op_src1_membase (int load_opcode, int opcode)
12483 /* FIXME: This has sign extension issues */
12485 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
12486 return OP_X86_COMPARE_MEMBASE8_IMM;
12489 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
12494 return OP_X86_PUSH_MEMBASE;
12495 case OP_COMPARE_IMM:
12496 case OP_ICOMPARE_IMM:
12497 return OP_X86_COMPARE_MEMBASE_IMM;
12500 return OP_X86_COMPARE_MEMBASE_REG;
12504 #ifdef TARGET_AMD64
12505 /* FIXME: This has sign extension issues */
12507 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
12508 return OP_X86_COMPARE_MEMBASE8_IMM;
12513 #ifdef __mono_ilp32__
12514 if (load_opcode == OP_LOADI8_MEMBASE)
12516 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
12518 return OP_X86_PUSH_MEMBASE;
12520 /* FIXME: This only works for 32 bit immediates
12521 case OP_COMPARE_IMM:
12522 case OP_LCOMPARE_IMM:
12523 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
12524 return OP_AMD64_COMPARE_MEMBASE_IMM;
12526 case OP_ICOMPARE_IMM:
12527 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
12528 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
12532 #ifdef __mono_ilp32__
12533 if (load_opcode == OP_LOAD_MEMBASE)
12534 return OP_AMD64_ICOMPARE_MEMBASE_REG;
12535 if (load_opcode == OP_LOADI8_MEMBASE)
12537 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
12539 return OP_AMD64_COMPARE_MEMBASE_REG;
12542 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
12543 return OP_AMD64_ICOMPARE_MEMBASE_REG;
12552 op_to_op_src2_membase (int load_opcode, int opcode)
12555 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
12561 return OP_X86_COMPARE_REG_MEMBASE;
12563 return OP_X86_ADD_REG_MEMBASE;
12565 return OP_X86_SUB_REG_MEMBASE;
12567 return OP_X86_AND_REG_MEMBASE;
12569 return OP_X86_OR_REG_MEMBASE;
12571 return OP_X86_XOR_REG_MEMBASE;
12575 #ifdef TARGET_AMD64
12576 #ifdef __mono_ilp32__
12577 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE) ) {
12579 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)) {
12583 return OP_AMD64_ICOMPARE_REG_MEMBASE;
12585 return OP_X86_ADD_REG_MEMBASE;
12587 return OP_X86_SUB_REG_MEMBASE;
12589 return OP_X86_AND_REG_MEMBASE;
12591 return OP_X86_OR_REG_MEMBASE;
12593 return OP_X86_XOR_REG_MEMBASE;
12595 #ifdef __mono_ilp32__
12596 } else if (load_opcode == OP_LOADI8_MEMBASE) {
12598 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE)) {
12603 return OP_AMD64_COMPARE_REG_MEMBASE;
12605 return OP_AMD64_ADD_REG_MEMBASE;
12607 return OP_AMD64_SUB_REG_MEMBASE;
12609 return OP_AMD64_AND_REG_MEMBASE;
12611 return OP_AMD64_OR_REG_MEMBASE;
12613 return OP_AMD64_XOR_REG_MEMBASE;
12622 mono_op_to_op_imm_noemul (int opcode)
12625 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
12631 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
12638 #if defined(MONO_ARCH_EMULATE_MUL_DIV)
12643 return mono_op_to_op_imm (opcode);
12648 * mono_handle_global_vregs:
12650 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
12654 mono_handle_global_vregs (MonoCompile *cfg)
12656 gint32 *vreg_to_bb;
12657 MonoBasicBlock *bb;
12660 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
12662 #ifdef MONO_ARCH_SIMD_INTRINSICS
12663 if (cfg->uses_simd_intrinsics)
12664 mono_simd_simplify_indirection (cfg);
12667 /* Find local vregs used in more than one bb */
12668 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
12669 MonoInst *ins = bb->code;
12670 int block_num = bb->block_num;
12672 if (cfg->verbose_level > 2)
12673 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
12676 for (; ins; ins = ins->next) {
12677 const char *spec = INS_INFO (ins->opcode);
12678 int regtype = 0, regindex;
12681 if (G_UNLIKELY (cfg->verbose_level > 2))
12682 mono_print_ins (ins);
12684 g_assert (ins->opcode >= MONO_CEE_LAST);
12686 for (regindex = 0; regindex < 4; regindex ++) {
12689 if (regindex == 0) {
12690 regtype = spec [MONO_INST_DEST];
12691 if (regtype == ' ')
12694 } else if (regindex == 1) {
12695 regtype = spec [MONO_INST_SRC1];
12696 if (regtype == ' ')
12699 } else if (regindex == 2) {
12700 regtype = spec [MONO_INST_SRC2];
12701 if (regtype == ' ')
12704 } else if (regindex == 3) {
12705 regtype = spec [MONO_INST_SRC3];
12706 if (regtype == ' ')
12711 #if SIZEOF_REGISTER == 4
12712 /* In the LLVM case, the long opcodes are not decomposed */
12713 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
12715 * Since some instructions reference the original long vreg,
12716 * and some reference the two component vregs, it is quite hard
12717 * to determine when it needs to be global. So be conservative.
12719 if (!get_vreg_to_inst (cfg, vreg)) {
12720 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
12722 if (cfg->verbose_level > 2)
12723 printf ("LONG VREG R%d made global.\n", vreg);
12727 * Make the component vregs volatile since the optimizations can
12728 * get confused otherwise.
12730 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
12731 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
12735 g_assert (vreg != -1);
12737 prev_bb = vreg_to_bb [vreg];
12738 if (prev_bb == 0) {
12739 /* 0 is a valid block num */
12740 vreg_to_bb [vreg] = block_num + 1;
12741 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
12742 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
12745 if (!get_vreg_to_inst (cfg, vreg)) {
12746 if (G_UNLIKELY (cfg->verbose_level > 2))
12747 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
12751 if (vreg_is_ref (cfg, vreg))
12752 mono_compile_create_var_for_vreg (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL, vreg);
12754 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
12757 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
12760 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
12763 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
12766 g_assert_not_reached ();
12770 /* Flag as having been used in more than one bb */
12771 vreg_to_bb [vreg] = -1;
12777 /* If a variable is used in only one bblock, convert it into a local vreg */
12778 for (i = 0; i < cfg->num_varinfo; i++) {
12779 MonoInst *var = cfg->varinfo [i];
12780 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
12782 switch (var->type) {
12788 #if SIZEOF_REGISTER == 8
12791 #if !defined(TARGET_X86)
12792 /* Enabling this screws up the fp stack on x86 */
12795 if (mono_arch_is_soft_float ())
12798 /* Arguments are implicitly global */
12799 /* Putting R4 vars into registers doesn't work currently */
12800 /* The gsharedvt vars are implicitly referenced by ldaddr opcodes, but those opcodes are only generated later */
12801 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg && var != cfg->gsharedvt_info_var && var != cfg->gsharedvt_locals_var && var != cfg->lmf_addr_var) {
12803 * Make that the variable's liveness interval doesn't contain a call, since
12804 * that would cause the lvreg to be spilled, making the whole optimization
12807 /* This is too slow for JIT compilation */
12809 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
12811 int def_index, call_index, ins_index;
12812 gboolean spilled = FALSE;
12817 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
12818 const char *spec = INS_INFO (ins->opcode);
12820 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
12821 def_index = ins_index;
12823 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
12824 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
12825 if (call_index > def_index) {
12831 if (MONO_IS_CALL (ins))
12832 call_index = ins_index;
12842 if (G_UNLIKELY (cfg->verbose_level > 2))
12843 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
12844 var->flags |= MONO_INST_IS_DEAD;
12845 cfg->vreg_to_inst [var->dreg] = NULL;
12852 * Compress the varinfo and vars tables so the liveness computation is faster and
12853 * takes up less space.
12856 for (i = 0; i < cfg->num_varinfo; ++i) {
12857 MonoInst *var = cfg->varinfo [i];
12858 if (pos < i && cfg->locals_start == i)
12859 cfg->locals_start = pos;
12860 if (!(var->flags & MONO_INST_IS_DEAD)) {
12862 cfg->varinfo [pos] = cfg->varinfo [i];
12863 cfg->varinfo [pos]->inst_c0 = pos;
12864 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
12865 cfg->vars [pos].idx = pos;
12866 #if SIZEOF_REGISTER == 4
12867 if (cfg->varinfo [pos]->type == STACK_I8) {
12868 /* Modify the two component vars too */
12871 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
12872 var1->inst_c0 = pos;
12873 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
12874 var1->inst_c0 = pos;
12881 cfg->num_varinfo = pos;
12882 if (cfg->locals_start > cfg->num_varinfo)
12883 cfg->locals_start = cfg->num_varinfo;
12887 * mono_spill_global_vars:
12889 * Generate spill code for variables which are not allocated to registers,
12890 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
12891 * code is generated which could be optimized by the local optimization passes.
12894 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
12896 MonoBasicBlock *bb;
12898 int orig_next_vreg;
12899 guint32 *vreg_to_lvreg;
12901 guint32 i, lvregs_len;
12902 gboolean dest_has_lvreg = FALSE;
12903 guint32 stacktypes [128];
12904 MonoInst **live_range_start, **live_range_end;
12905 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
12906 int *gsharedvt_vreg_to_idx = NULL;
12908 *need_local_opts = FALSE;
12910 memset (spec2, 0, sizeof (spec2));
12912 /* FIXME: Move this function to mini.c */
12913 stacktypes ['i'] = STACK_PTR;
12914 stacktypes ['l'] = STACK_I8;
12915 stacktypes ['f'] = STACK_R8;
12916 #ifdef MONO_ARCH_SIMD_INTRINSICS
12917 stacktypes ['x'] = STACK_VTYPE;
12920 #if SIZEOF_REGISTER == 4
12921 /* Create MonoInsts for longs */
12922 for (i = 0; i < cfg->num_varinfo; i++) {
12923 MonoInst *ins = cfg->varinfo [i];
12925 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
12926 switch (ins->type) {
12931 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
12934 g_assert (ins->opcode == OP_REGOFFSET);
12936 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
12938 tree->opcode = OP_REGOFFSET;
12939 tree->inst_basereg = ins->inst_basereg;
12940 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
12942 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
12944 tree->opcode = OP_REGOFFSET;
12945 tree->inst_basereg = ins->inst_basereg;
12946 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
12956 if (cfg->compute_gc_maps) {
12957 /* registers need liveness info even for !non refs */
12958 for (i = 0; i < cfg->num_varinfo; i++) {
12959 MonoInst *ins = cfg->varinfo [i];
12961 if (ins->opcode == OP_REGVAR)
12962 ins->flags |= MONO_INST_GC_TRACK;
12966 if (cfg->gsharedvt) {
12967 gsharedvt_vreg_to_idx = mono_mempool_alloc0 (cfg->mempool, sizeof (int) * cfg->next_vreg);
12969 for (i = 0; i < cfg->num_varinfo; ++i) {
12970 MonoInst *ins = cfg->varinfo [i];
12973 if (mini_is_gsharedvt_variable_type (cfg, ins->inst_vtype)) {
12974 if (i >= cfg->locals_start) {
12976 idx = get_gsharedvt_info_slot (cfg, ins->inst_vtype, MONO_RGCTX_INFO_LOCAL_OFFSET);
12977 gsharedvt_vreg_to_idx [ins->dreg] = idx + 1;
12978 ins->opcode = OP_GSHAREDVT_LOCAL;
12979 ins->inst_imm = idx;
12982 gsharedvt_vreg_to_idx [ins->dreg] = -1;
12983 ins->opcode = OP_GSHAREDVT_ARG_REGOFFSET;
12989 /* FIXME: widening and truncation */
12992 * As an optimization, when a variable allocated to the stack is first loaded into
12993 * an lvreg, we will remember the lvreg and use it the next time instead of loading
12994 * the variable again.
12996 orig_next_vreg = cfg->next_vreg;
12997 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
12998 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
13002 * These arrays contain the first and last instructions accessing a given
13004 * Since we emit bblocks in the same order we process them here, and we
13005 * don't split live ranges, these will precisely describe the live range of
13006 * the variable, i.e. the instruction range where a valid value can be found
13007 * in the variables location.
13008 * The live range is computed using the liveness info computed by the liveness pass.
13009 * We can't use vmv->range, since that is an abstract live range, and we need
13010 * one which is instruction precise.
13011 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
13013 /* FIXME: Only do this if debugging info is requested */
13014 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
13015 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
13016 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
13017 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
13019 /* Add spill loads/stores */
13020 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13023 if (cfg->verbose_level > 2)
13024 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
13026 /* Clear vreg_to_lvreg array */
13027 for (i = 0; i < lvregs_len; i++)
13028 vreg_to_lvreg [lvregs [i]] = 0;
13032 MONO_BB_FOR_EACH_INS (bb, ins) {
13033 const char *spec = INS_INFO (ins->opcode);
13034 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
13035 gboolean store, no_lvreg;
13036 int sregs [MONO_MAX_SRC_REGS];
13038 if (G_UNLIKELY (cfg->verbose_level > 2))
13039 mono_print_ins (ins);
13041 if (ins->opcode == OP_NOP)
13045 * We handle LDADDR here as well, since it can only be decomposed
13046 * when variable addresses are known.
13048 if (ins->opcode == OP_LDADDR) {
13049 MonoInst *var = ins->inst_p0;
13051 if (var->opcode == OP_VTARG_ADDR) {
13052 /* Happens on SPARC/S390 where vtypes are passed by reference */
13053 MonoInst *vtaddr = var->inst_left;
13054 if (vtaddr->opcode == OP_REGVAR) {
13055 ins->opcode = OP_MOVE;
13056 ins->sreg1 = vtaddr->dreg;
13058 else if (var->inst_left->opcode == OP_REGOFFSET) {
13059 ins->opcode = OP_LOAD_MEMBASE;
13060 ins->inst_basereg = vtaddr->inst_basereg;
13061 ins->inst_offset = vtaddr->inst_offset;
13064 } else if (cfg->gsharedvt && gsharedvt_vreg_to_idx [var->dreg] < 0) {
13065 /* gsharedvt arg passed by ref */
13066 g_assert (var->opcode == OP_GSHAREDVT_ARG_REGOFFSET);
13068 ins->opcode = OP_LOAD_MEMBASE;
13069 ins->inst_basereg = var->inst_basereg;
13070 ins->inst_offset = var->inst_offset;
13071 } else if (cfg->gsharedvt && gsharedvt_vreg_to_idx [var->dreg]) {
13072 MonoInst *load, *load2, *load3;
13073 int idx = gsharedvt_vreg_to_idx [var->dreg] - 1;
13074 int reg1, reg2, reg3;
13075 MonoInst *info_var = cfg->gsharedvt_info_var;
13076 MonoInst *locals_var = cfg->gsharedvt_locals_var;
13080 * Compute the address of the local as gsharedvt_locals_var + gsharedvt_info_var->locals_offsets [idx].
13083 g_assert (var->opcode == OP_GSHAREDVT_LOCAL);
13085 g_assert (info_var);
13086 g_assert (locals_var);
13088 /* Mark the instruction used to compute the locals var as used */
13089 cfg->gsharedvt_locals_var_ins = NULL;
13091 /* Load the offset */
13092 if (info_var->opcode == OP_REGOFFSET) {
13093 reg1 = alloc_ireg (cfg);
13094 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, reg1, info_var->inst_basereg, info_var->inst_offset);
13095 } else if (info_var->opcode == OP_REGVAR) {
13097 reg1 = info_var->dreg;
13099 g_assert_not_reached ();
13101 reg2 = alloc_ireg (cfg);
13102 NEW_LOAD_MEMBASE (cfg, load2, OP_LOADI4_MEMBASE, reg2, reg1, G_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
13103 /* Load the locals area address */
13104 reg3 = alloc_ireg (cfg);
13105 if (locals_var->opcode == OP_REGOFFSET) {
13106 NEW_LOAD_MEMBASE (cfg, load3, OP_LOAD_MEMBASE, reg3, locals_var->inst_basereg, locals_var->inst_offset);
13107 } else if (locals_var->opcode == OP_REGVAR) {
13108 NEW_UNALU (cfg, load3, OP_MOVE, reg3, locals_var->dreg);
13110 g_assert_not_reached ();
13112 /* Compute the address */
13113 ins->opcode = OP_PADD;
13117 mono_bblock_insert_before_ins (bb, ins, load3);
13118 mono_bblock_insert_before_ins (bb, load3, load2);
13120 mono_bblock_insert_before_ins (bb, load2, load);
13122 g_assert (var->opcode == OP_REGOFFSET);
13124 ins->opcode = OP_ADD_IMM;
13125 ins->sreg1 = var->inst_basereg;
13126 ins->inst_imm = var->inst_offset;
13129 *need_local_opts = TRUE;
13130 spec = INS_INFO (ins->opcode);
13133 if (ins->opcode < MONO_CEE_LAST) {
13134 mono_print_ins (ins);
13135 g_assert_not_reached ();
13139 * Store opcodes have destbasereg in the dreg, but in reality, it is an
13143 if (MONO_IS_STORE_MEMBASE (ins)) {
13144 tmp_reg = ins->dreg;
13145 ins->dreg = ins->sreg2;
13146 ins->sreg2 = tmp_reg;
13149 spec2 [MONO_INST_DEST] = ' ';
13150 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
13151 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
13152 spec2 [MONO_INST_SRC3] = ' ';
13154 } else if (MONO_IS_STORE_MEMINDEX (ins))
13155 g_assert_not_reached ();
13160 if (G_UNLIKELY (cfg->verbose_level > 2)) {
13161 printf ("\t %.3s %d", spec, ins->dreg);
13162 num_sregs = mono_inst_get_src_registers (ins, sregs);
13163 for (srcindex = 0; srcindex < num_sregs; ++srcindex)
13164 printf (" %d", sregs [srcindex]);
13171 regtype = spec [MONO_INST_DEST];
13172 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
13175 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
13176 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
13177 MonoInst *store_ins;
13179 MonoInst *def_ins = ins;
13180 int dreg = ins->dreg; /* The original vreg */
13182 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
13184 if (var->opcode == OP_REGVAR) {
13185 ins->dreg = var->dreg;
13186 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
13188 * Instead of emitting a load+store, use a _membase opcode.
13190 g_assert (var->opcode == OP_REGOFFSET);
13191 if (ins->opcode == OP_MOVE) {
13195 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
13196 ins->inst_basereg = var->inst_basereg;
13197 ins->inst_offset = var->inst_offset;
13200 spec = INS_INFO (ins->opcode);
13204 g_assert (var->opcode == OP_REGOFFSET);
13206 prev_dreg = ins->dreg;
13208 /* Invalidate any previous lvreg for this vreg */
13209 vreg_to_lvreg [ins->dreg] = 0;
13213 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
13215 store_opcode = OP_STOREI8_MEMBASE_REG;
13218 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
13220 #if SIZEOF_REGISTER != 8
13221 if (regtype == 'l') {
13222 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
13223 mono_bblock_insert_after_ins (bb, ins, store_ins);
13224 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
13225 mono_bblock_insert_after_ins (bb, ins, store_ins);
13226 def_ins = store_ins;
13231 g_assert (store_opcode != OP_STOREV_MEMBASE);
13233 /* Try to fuse the store into the instruction itself */
13234 /* FIXME: Add more instructions */
13235 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
13236 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
13237 ins->inst_imm = ins->inst_c0;
13238 ins->inst_destbasereg = var->inst_basereg;
13239 ins->inst_offset = var->inst_offset;
13240 spec = INS_INFO (ins->opcode);
13241 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
13242 ins->opcode = store_opcode;
13243 ins->inst_destbasereg = var->inst_basereg;
13244 ins->inst_offset = var->inst_offset;
13248 tmp_reg = ins->dreg;
13249 ins->dreg = ins->sreg2;
13250 ins->sreg2 = tmp_reg;
13253 spec2 [MONO_INST_DEST] = ' ';
13254 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
13255 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
13256 spec2 [MONO_INST_SRC3] = ' ';
13258 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
13259 // FIXME: The backends expect the base reg to be in inst_basereg
13260 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
13262 ins->inst_basereg = var->inst_basereg;
13263 ins->inst_offset = var->inst_offset;
13264 spec = INS_INFO (ins->opcode);
13266 /* printf ("INS: "); mono_print_ins (ins); */
13267 /* Create a store instruction */
13268 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
13270 /* Insert it after the instruction */
13271 mono_bblock_insert_after_ins (bb, ins, store_ins);
13273 def_ins = store_ins;
13276 * We can't assign ins->dreg to var->dreg here, since the
13277 * sregs could use it. So set a flag, and do it after
13280 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
13281 dest_has_lvreg = TRUE;
13286 if (def_ins && !live_range_start [dreg]) {
13287 live_range_start [dreg] = def_ins;
13288 live_range_start_bb [dreg] = bb;
13291 if (cfg->compute_gc_maps && def_ins && (var->flags & MONO_INST_GC_TRACK)) {
13294 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_DEF);
13295 tmp->inst_c1 = dreg;
13296 mono_bblock_insert_after_ins (bb, def_ins, tmp);
13303 num_sregs = mono_inst_get_src_registers (ins, sregs);
13304 for (srcindex = 0; srcindex < 3; ++srcindex) {
13305 regtype = spec [MONO_INST_SRC1 + srcindex];
13306 sreg = sregs [srcindex];
13308 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
13309 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
13310 MonoInst *var = get_vreg_to_inst (cfg, sreg);
13311 MonoInst *use_ins = ins;
13312 MonoInst *load_ins;
13313 guint32 load_opcode;
13315 if (var->opcode == OP_REGVAR) {
13316 sregs [srcindex] = var->dreg;
13317 //mono_inst_set_src_registers (ins, sregs);
13318 live_range_end [sreg] = use_ins;
13319 live_range_end_bb [sreg] = bb;
13321 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
13324 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
13325 /* var->dreg is a hreg */
13326 tmp->inst_c1 = sreg;
13327 mono_bblock_insert_after_ins (bb, ins, tmp);
13333 g_assert (var->opcode == OP_REGOFFSET);
13335 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
13337 g_assert (load_opcode != OP_LOADV_MEMBASE);
13339 if (vreg_to_lvreg [sreg]) {
13340 g_assert (vreg_to_lvreg [sreg] != -1);
13342 /* The variable is already loaded to an lvreg */
13343 if (G_UNLIKELY (cfg->verbose_level > 2))
13344 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
13345 sregs [srcindex] = vreg_to_lvreg [sreg];
13346 //mono_inst_set_src_registers (ins, sregs);
13350 /* Try to fuse the load into the instruction */
13351 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
13352 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
13353 sregs [0] = var->inst_basereg;
13354 //mono_inst_set_src_registers (ins, sregs);
13355 ins->inst_offset = var->inst_offset;
13356 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
13357 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
13358 sregs [1] = var->inst_basereg;
13359 //mono_inst_set_src_registers (ins, sregs);
13360 ins->inst_offset = var->inst_offset;
13362 if (MONO_IS_REAL_MOVE (ins)) {
13363 ins->opcode = OP_NOP;
13366 //printf ("%d ", srcindex); mono_print_ins (ins);
13368 sreg = alloc_dreg (cfg, stacktypes [regtype]);
13370 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
13371 if (var->dreg == prev_dreg) {
13373 * sreg refers to the value loaded by the load
13374 * emitted below, but we need to use ins->dreg
13375 * since it refers to the store emitted earlier.
13379 g_assert (sreg != -1);
13380 vreg_to_lvreg [var->dreg] = sreg;
13381 g_assert (lvregs_len < 1024);
13382 lvregs [lvregs_len ++] = var->dreg;
13386 sregs [srcindex] = sreg;
13387 //mono_inst_set_src_registers (ins, sregs);
13389 #if SIZEOF_REGISTER != 8
13390 if (regtype == 'l') {
13391 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
13392 mono_bblock_insert_before_ins (bb, ins, load_ins);
13393 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
13394 mono_bblock_insert_before_ins (bb, ins, load_ins);
13395 use_ins = load_ins;
13400 #if SIZEOF_REGISTER == 4
13401 g_assert (load_opcode != OP_LOADI8_MEMBASE);
13403 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
13404 mono_bblock_insert_before_ins (bb, ins, load_ins);
13405 use_ins = load_ins;
13409 if (var->dreg < orig_next_vreg) {
13410 live_range_end [var->dreg] = use_ins;
13411 live_range_end_bb [var->dreg] = bb;
13414 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
13417 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
13418 tmp->inst_c1 = var->dreg;
13419 mono_bblock_insert_after_ins (bb, ins, tmp);
13423 mono_inst_set_src_registers (ins, sregs);
13425 if (dest_has_lvreg) {
13426 g_assert (ins->dreg != -1);
13427 vreg_to_lvreg [prev_dreg] = ins->dreg;
13428 g_assert (lvregs_len < 1024);
13429 lvregs [lvregs_len ++] = prev_dreg;
13430 dest_has_lvreg = FALSE;
13434 tmp_reg = ins->dreg;
13435 ins->dreg = ins->sreg2;
13436 ins->sreg2 = tmp_reg;
13439 if (MONO_IS_CALL (ins)) {
13440 /* Clear vreg_to_lvreg array */
13441 for (i = 0; i < lvregs_len; i++)
13442 vreg_to_lvreg [lvregs [i]] = 0;
13444 } else if (ins->opcode == OP_NOP) {
13446 MONO_INST_NULLIFY_SREGS (ins);
13449 if (cfg->verbose_level > 2)
13450 mono_print_ins_index (1, ins);
13453 /* Extend the live range based on the liveness info */
13454 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
13455 for (i = 0; i < cfg->num_varinfo; i ++) {
13456 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
13458 if (vreg_is_volatile (cfg, vi->vreg))
13459 /* The liveness info is incomplete */
13462 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
13463 /* Live from at least the first ins of this bb */
13464 live_range_start [vi->vreg] = bb->code;
13465 live_range_start_bb [vi->vreg] = bb;
13468 if (mono_bitset_test_fast (bb->live_out_set, i)) {
13469 /* Live at least until the last ins of this bb */
13470 live_range_end [vi->vreg] = bb->last_ins;
13471 live_range_end_bb [vi->vreg] = bb;
13477 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
13479 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
13480 * by storing the current native offset into MonoMethodVar->live_range_start/end.
13482 if (cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
13483 for (i = 0; i < cfg->num_varinfo; ++i) {
13484 int vreg = MONO_VARINFO (cfg, i)->vreg;
13487 if (live_range_start [vreg]) {
13488 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
13490 ins->inst_c1 = vreg;
13491 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
13493 if (live_range_end [vreg]) {
13494 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
13496 ins->inst_c1 = vreg;
13497 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
13498 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
13500 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
13506 if (cfg->gsharedvt_locals_var_ins) {
13507 /* Nullify if unused */
13508 cfg->gsharedvt_locals_var_ins->opcode = OP_PCONST;
13509 cfg->gsharedvt_locals_var_ins->inst_imm = 0;
13512 g_free (live_range_start);
13513 g_free (live_range_end);
13514 g_free (live_range_start_bb);
13515 g_free (live_range_end_bb);
13520 * - use 'iadd' instead of 'int_add'
13521 * - handling ovf opcodes: decompose in method_to_ir.
13522 * - unify iregs/fregs
13523 * -> partly done, the missing parts are:
13524 * - a more complete unification would involve unifying the hregs as well, so
13525 * code wouldn't need if (fp) all over the place. but that would mean the hregs
13526 * would no longer map to the machine hregs, so the code generators would need to
13527 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
13528 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
13529 * fp/non-fp branches speeds it up by about 15%.
13530 * - use sext/zext opcodes instead of shifts
13532 * - get rid of TEMPLOADs if possible and use vregs instead
13533 * - clean up usage of OP_P/OP_ opcodes
13534 * - cleanup usage of DUMMY_USE
13535 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
13537 * - set the stack type and allocate a dreg in the EMIT_NEW macros
13538 * - get rid of all the <foo>2 stuff when the new JIT is ready.
13539 * - make sure handle_stack_args () is called before the branch is emitted
13540 * - when the new IR is done, get rid of all unused stuff
13541 * - COMPARE/BEQ as separate instructions or unify them ?
13542 * - keeping them separate allows specialized compare instructions like
13543 * compare_imm, compare_membase
13544 * - most back ends unify fp compare+branch, fp compare+ceq
13545 * - integrate mono_save_args into inline_method
13546 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
13547 * - handle long shift opts on 32 bit platforms somehow: they require
13548 * 3 sregs (2 for arg1 and 1 for arg2)
13549 * - make byref a 'normal' type.
13550 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
13551 * variable if needed.
13552 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
13553 * like inline_method.
13554 * - remove inlining restrictions
13555 * - fix LNEG and enable cfold of INEG
13556 * - generalize x86 optimizations like ldelema as a peephole optimization
13557 * - add store_mem_imm for amd64
13558 * - optimize the loading of the interruption flag in the managed->native wrappers
13559 * - avoid special handling of OP_NOP in passes
13560 * - move code inserting instructions into one function/macro.
13561 * - try a coalescing phase after liveness analysis
13562 * - add float -> vreg conversion + local optimizations on !x86
13563 * - figure out how to handle decomposed branches during optimizations, ie.
13564 * compare+branch, op_jump_table+op_br etc.
13565 * - promote RuntimeXHandles to vregs
13566 * - vtype cleanups:
13567 * - add a NEW_VARLOADA_VREG macro
13568 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
13569 * accessing vtype fields.
13570 * - get rid of I8CONST on 64 bit platforms
13571 * - dealing with the increase in code size due to branches created during opcode
13573 * - use extended basic blocks
13574 * - all parts of the JIT
13575 * - handle_global_vregs () && local regalloc
13576 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
13577 * - sources of increase in code size:
13580 * - isinst and castclass
13581 * - lvregs not allocated to global registers even if used multiple times
13582 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
13584 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
13585 * - add all micro optimizations from the old JIT
13586 * - put tree optimizations into the deadce pass
13587 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
13588 * specific function.
13589 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
13590 * fcompare + branchCC.
13591 * - create a helper function for allocating a stack slot, taking into account
13592 * MONO_CFG_HAS_SPILLUP.
13594 * - merge the ia64 switch changes.
13595 * - optimize mono_regstate2_alloc_int/float.
13596 * - fix the pessimistic handling of variables accessed in exception handler blocks.
13597 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
13598 * parts of the tree could be separated by other instructions, killing the tree
13599 * arguments, or stores killing loads etc. Also, should we fold loads into other
13600 * instructions if the result of the load is used multiple times ?
13601 * - make the REM_IMM optimization in mini-x86.c arch-independent.
13602 * - LAST MERGE: 108395.
13603 * - when returning vtypes in registers, generate IR and append it to the end of the
13604 * last bb instead of doing it in the epilog.
13605 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
13613 - When to decompose opcodes:
13614 - earlier: this makes some optimizations hard to implement, since the low level IR
13615 no longer contains the neccessary information. But it is easier to do.
13616 - later: harder to implement, enables more optimizations.
13617 - Branches inside bblocks:
13618 - created when decomposing complex opcodes.
13619 - branches to another bblock: harmless, but not tracked by the branch
13620 optimizations, so need to branch to a label at the start of the bblock.
13621 - branches to inside the same bblock: very problematic, trips up the local
13622 reg allocator. Can be fixed by spitting the current bblock, but that is a
13623 complex operation, since some local vregs can become global vregs etc.
13624 - Local/global vregs:
13625 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
13626 local register allocator.
13627 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
13628 structure, created by mono_create_var (). Assigned to hregs or the stack by
13629 the global register allocator.
13630 - When to do optimizations like alu->alu_imm:
13631 - earlier -> saves work later on since the IR will be smaller/simpler
13632 - later -> can work on more instructions
13633 - Handling of valuetypes:
13634 - When a vtype is pushed on the stack, a new temporary is created, an
13635 instruction computing its address (LDADDR) is emitted and pushed on
13636 the stack. Need to optimize cases when the vtype is used immediately as in
13637 argument passing, stloc etc.
13638 - Instead of the to_end stuff in the old JIT, simply call the function handling
13639 the values on the stack before emitting the last instruction of the bb.
13642 #endif /* DISABLE_JIT */