2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
9 * Copyright 2003-2010 Novell, Inc (http://www.novell.com)
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
27 #ifdef HAVE_SYS_TIME_H
35 #include <mono/utils/memcheck.h>
37 #include <mono/metadata/assembly.h>
38 #include <mono/metadata/attrdefs.h>
39 #include <mono/metadata/loader.h>
40 #include <mono/metadata/tabledefs.h>
41 #include <mono/metadata/class.h>
42 #include <mono/metadata/object.h>
43 #include <mono/metadata/exception.h>
44 #include <mono/metadata/opcodes.h>
45 #include <mono/metadata/mono-endian.h>
46 #include <mono/metadata/tokentype.h>
47 #include <mono/metadata/tabledefs.h>
48 #include <mono/metadata/marshal.h>
49 #include <mono/metadata/debug-helpers.h>
50 #include <mono/metadata/mono-debug.h>
51 #include <mono/metadata/gc-internal.h>
52 #include <mono/metadata/security-manager.h>
53 #include <mono/metadata/threads-types.h>
54 #include <mono/metadata/security-core-clr.h>
55 #include <mono/metadata/monitor.h>
56 #include <mono/metadata/profiler-private.h>
57 #include <mono/metadata/profiler.h>
58 #include <mono/metadata/debug-mono-symfile.h>
59 #include <mono/utils/mono-compiler.h>
60 #include <mono/utils/mono-memory-model.h>
61 #include <mono/metadata/mono-basic-block.h>
68 #include "jit-icalls.h"
70 #include "debugger-agent.h"
72 #define BRANCH_COST 10
73 #define INLINE_LENGTH_LIMIT 20
74 #define INLINE_FAILURE(msg) do { \
75 if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE)) { \
76 if (cfg->verbose_level >= 2) \
77 printf ("inline failed: %s\n", msg); \
78 goto inline_failure; \
81 #define CHECK_CFG_EXCEPTION do {\
82 if (cfg->exception_type != MONO_EXCEPTION_NONE)\
85 #define METHOD_ACCESS_FAILURE do { \
86 char *method_fname = mono_method_full_name (method, TRUE); \
87 char *cil_method_fname = mono_method_full_name (cil_method, TRUE); \
88 mono_cfg_set_exception (cfg, MONO_EXCEPTION_METHOD_ACCESS); \
89 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname); \
90 g_free (method_fname); \
91 g_free (cil_method_fname); \
92 goto exception_exit; \
94 #define FIELD_ACCESS_FAILURE do { \
95 char *method_fname = mono_method_full_name (method, TRUE); \
96 char *field_fname = mono_field_full_name (field); \
97 mono_cfg_set_exception (cfg, MONO_EXCEPTION_FIELD_ACCESS); \
98 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); \
99 g_free (method_fname); \
100 g_free (field_fname); \
101 goto exception_exit; \
103 #define GENERIC_SHARING_FAILURE(opcode) do { \
104 if (cfg->generic_sharing_context) { \
105 if (cfg->verbose_level > 2) \
106 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __LINE__); \
107 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
108 goto exception_exit; \
111 #define GSHAREDVT_FAILURE(opcode) do { \
112 if (cfg->gsharedvt) { \
113 cfg->exception_message = g_strdup_printf ("gsharedvt failed for method %s.%s.%s/%d opcode %s %s:%d", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __FILE__, __LINE__); \
114 if (cfg->verbose_level >= 2) \
115 printf ("%s\n", cfg->exception_message); \
116 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
117 goto exception_exit; \
120 #define OUT_OF_MEMORY_FAILURE do { \
121 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OUT_OF_MEMORY); \
122 goto exception_exit; \
124 #define DISABLE_AOT(cfg) do { \
125 if ((cfg)->verbose_level >= 2) \
126 printf ("AOT disabled: %s:%d\n", __FILE__, __LINE__); \
127 (cfg)->disable_aot = TRUE; \
130 /* Determine whenever 'ins' represents a load of the 'this' argument */
131 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
133 static int ldind_to_load_membase (int opcode);
134 static int stind_to_store_membase (int opcode);
136 int mono_op_to_op_imm (int opcode);
137 int mono_op_to_op_imm_noemul (int opcode);
139 MONO_API MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
141 /* helper methods signatures */
142 static MonoMethodSignature *helper_sig_class_init_trampoline = NULL;
143 static MonoMethodSignature *helper_sig_domain_get = NULL;
144 static MonoMethodSignature *helper_sig_generic_class_init_trampoline = NULL;
145 static MonoMethodSignature *helper_sig_generic_class_init_trampoline_llvm = NULL;
146 static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline = NULL;
147 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline = NULL;
148 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline_llvm = NULL;
151 * Instruction metadata
159 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
160 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
166 #if SIZEOF_REGISTER == 8 && SIZEOF_REGISTER == SIZEOF_VOID_P
171 /* keep in sync with the enum in mini.h */
174 #include "mini-ops.h"
179 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
180 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
182 * This should contain the index of the last sreg + 1. This is not the same
183 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
185 const gint8 ins_sreg_counts[] = {
186 #include "mini-ops.h"
191 #define MONO_INIT_VARINFO(vi,id) do { \
192 (vi)->range.first_use.pos.bid = 0xffff; \
198 mono_inst_set_src_registers (MonoInst *ins, int *regs)
200 ins->sreg1 = regs [0];
201 ins->sreg2 = regs [1];
202 ins->sreg3 = regs [2];
206 mono_alloc_ireg (MonoCompile *cfg)
208 return alloc_ireg (cfg);
212 mono_alloc_freg (MonoCompile *cfg)
214 return alloc_freg (cfg);
218 mono_alloc_preg (MonoCompile *cfg)
220 return alloc_preg (cfg);
224 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
226 return alloc_dreg (cfg, stack_type);
230 * mono_alloc_ireg_ref:
232 * Allocate an IREG, and mark it as holding a GC ref.
235 mono_alloc_ireg_ref (MonoCompile *cfg)
237 return alloc_ireg_ref (cfg);
241 * mono_alloc_ireg_mp:
243 * Allocate an IREG, and mark it as holding a managed pointer.
246 mono_alloc_ireg_mp (MonoCompile *cfg)
248 return alloc_ireg_mp (cfg);
252 * mono_alloc_ireg_copy:
254 * Allocate an IREG with the same GC type as VREG.
257 mono_alloc_ireg_copy (MonoCompile *cfg, guint32 vreg)
259 if (vreg_is_ref (cfg, vreg))
260 return alloc_ireg_ref (cfg);
261 else if (vreg_is_mp (cfg, vreg))
262 return alloc_ireg_mp (cfg);
264 return alloc_ireg (cfg);
268 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
273 type = mini_replace_type (type);
275 switch (type->type) {
278 case MONO_TYPE_BOOLEAN:
290 case MONO_TYPE_FNPTR:
292 case MONO_TYPE_CLASS:
293 case MONO_TYPE_STRING:
294 case MONO_TYPE_OBJECT:
295 case MONO_TYPE_SZARRAY:
296 case MONO_TYPE_ARRAY:
300 #if SIZEOF_REGISTER == 8
309 case MONO_TYPE_VALUETYPE:
310 if (type->data.klass->enumtype) {
311 type = mono_class_enum_basetype (type->data.klass);
314 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
317 case MONO_TYPE_TYPEDBYREF:
319 case MONO_TYPE_GENERICINST:
320 type = &type->data.generic_class->container_class->byval_arg;
324 g_assert (cfg->generic_sharing_context);
325 if (mini_type_var_is_vt (cfg, type))
330 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
336 mono_print_bb (MonoBasicBlock *bb, const char *msg)
341 printf ("\n%s %d: [IN: ", msg, bb->block_num);
342 for (i = 0; i < bb->in_count; ++i)
343 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
345 for (i = 0; i < bb->out_count; ++i)
346 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
348 for (tree = bb->code; tree; tree = tree->next)
349 mono_print_ins_index (-1, tree);
353 mono_create_helper_signatures (void)
355 helper_sig_domain_get = mono_create_icall_signature ("ptr");
356 helper_sig_class_init_trampoline = mono_create_icall_signature ("void");
357 helper_sig_generic_class_init_trampoline = mono_create_icall_signature ("void");
358 helper_sig_generic_class_init_trampoline_llvm = mono_create_icall_signature ("void ptr");
359 helper_sig_rgctx_lazy_fetch_trampoline = mono_create_icall_signature ("ptr ptr");
360 helper_sig_monitor_enter_exit_trampoline = mono_create_icall_signature ("void");
361 helper_sig_monitor_enter_exit_trampoline_llvm = mono_create_icall_signature ("void object");
365 * When using gsharedvt, some instatiations might be verifiable, and some might be not. i.e.
366 * foo<T> (int i) { ldarg.0; box T; }
368 #define UNVERIFIED do { \
369 if (cfg->gsharedvt) { \
370 if (cfg->verbose_level > 2) \
371 printf ("gsharedvt method failed to verify, falling back to instantiation.\n"); \
372 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
373 goto exception_exit; \
375 if (mini_get_debug_options ()->break_on_unverified) \
381 #define LOAD_ERROR do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto load_error; } while (0)
383 #define TYPE_LOAD_ERROR(klass) do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else { cfg->exception_ptr = klass; goto load_error; } } while (0)
385 #define GET_BBLOCK(cfg,tblock,ip) do { \
386 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
388 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
389 NEW_BBLOCK (cfg, (tblock)); \
390 (tblock)->cil_code = (ip); \
391 ADD_BBLOCK (cfg, (tblock)); \
395 #if defined(TARGET_X86) || defined(TARGET_AMD64)
396 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
397 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
398 (dest)->dreg = alloc_ireg_mp ((cfg)); \
399 (dest)->sreg1 = (sr1); \
400 (dest)->sreg2 = (sr2); \
401 (dest)->inst_imm = (imm); \
402 (dest)->backend.shift_amount = (shift); \
403 MONO_ADD_INS ((cfg)->cbb, (dest)); \
407 #if SIZEOF_REGISTER == 8
408 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
409 /* FIXME: Need to add many more cases */ \
410 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
412 int dr = alloc_preg (cfg); \
413 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
414 (ins)->sreg2 = widen->dreg; \
418 #define ADD_WIDEN_OP(ins, arg1, arg2)
421 #define ADD_BINOP(op) do { \
422 MONO_INST_NEW (cfg, ins, (op)); \
424 ins->sreg1 = sp [0]->dreg; \
425 ins->sreg2 = sp [1]->dreg; \
426 type_from_op (ins, sp [0], sp [1]); \
428 /* Have to insert a widening op */ \
429 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
430 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
431 MONO_ADD_INS ((cfg)->cbb, (ins)); \
432 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
435 #define ADD_UNOP(op) do { \
436 MONO_INST_NEW (cfg, ins, (op)); \
438 ins->sreg1 = sp [0]->dreg; \
439 type_from_op (ins, sp [0], NULL); \
441 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
442 MONO_ADD_INS ((cfg)->cbb, (ins)); \
443 *sp++ = mono_decompose_opcode (cfg, ins); \
446 #define ADD_BINCOND(next_block) do { \
449 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
450 cmp->sreg1 = sp [0]->dreg; \
451 cmp->sreg2 = sp [1]->dreg; \
452 type_from_op (cmp, sp [0], sp [1]); \
454 type_from_op (ins, sp [0], sp [1]); \
455 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
456 GET_BBLOCK (cfg, tblock, target); \
457 link_bblock (cfg, bblock, tblock); \
458 ins->inst_true_bb = tblock; \
459 if ((next_block)) { \
460 link_bblock (cfg, bblock, (next_block)); \
461 ins->inst_false_bb = (next_block); \
462 start_new_bblock = 1; \
464 GET_BBLOCK (cfg, tblock, ip); \
465 link_bblock (cfg, bblock, tblock); \
466 ins->inst_false_bb = tblock; \
467 start_new_bblock = 2; \
469 if (sp != stack_start) { \
470 handle_stack_args (cfg, stack_start, sp - stack_start); \
471 CHECK_UNVERIFIABLE (cfg); \
473 MONO_ADD_INS (bblock, cmp); \
474 MONO_ADD_INS (bblock, ins); \
478 * link_bblock: Links two basic blocks
480 * links two basic blocks in the control flow graph, the 'from'
481 * argument is the starting block and the 'to' argument is the block
482 * the control flow ends to after 'from'.
485 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
487 MonoBasicBlock **newa;
491 if (from->cil_code) {
493 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
495 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
498 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
500 printf ("edge from entry to exit\n");
505 for (i = 0; i < from->out_count; ++i) {
506 if (to == from->out_bb [i]) {
512 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
513 for (i = 0; i < from->out_count; ++i) {
514 newa [i] = from->out_bb [i];
522 for (i = 0; i < to->in_count; ++i) {
523 if (from == to->in_bb [i]) {
529 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
530 for (i = 0; i < to->in_count; ++i) {
531 newa [i] = to->in_bb [i];
540 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
542 link_bblock (cfg, from, to);
546 * mono_find_block_region:
548 * We mark each basic block with a region ID. We use that to avoid BB
549 * optimizations when blocks are in different regions.
552 * A region token that encodes where this region is, and information
553 * about the clause owner for this block.
555 * The region encodes the try/catch/filter clause that owns this block
556 * as well as the type. -1 is a special value that represents a block
557 * that is in none of try/catch/filter.
560 mono_find_block_region (MonoCompile *cfg, int offset)
562 MonoMethodHeader *header = cfg->header;
563 MonoExceptionClause *clause;
566 for (i = 0; i < header->num_clauses; ++i) {
567 clause = &header->clauses [i];
568 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
569 (offset < (clause->handler_offset)))
570 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
572 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
573 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
574 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
575 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
576 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
578 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
581 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
582 return ((i + 1) << 8) | clause->flags;
589 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
591 MonoMethodHeader *header = cfg->header;
592 MonoExceptionClause *clause;
596 for (i = 0; i < header->num_clauses; ++i) {
597 clause = &header->clauses [i];
598 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
599 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
600 if (clause->flags == type)
601 res = g_list_append (res, clause);
608 mono_create_spvar_for_region (MonoCompile *cfg, int region)
612 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
616 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
617 /* prevent it from being register allocated */
618 var->flags |= MONO_INST_VOLATILE;
620 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
624 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
626 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
630 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
634 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
638 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
639 /* prevent it from being register allocated */
640 var->flags |= MONO_INST_VOLATILE;
642 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
648 * Returns the type used in the eval stack when @type is loaded.
649 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
652 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
656 type = mini_replace_type (type);
657 inst->klass = klass = mono_class_from_mono_type (type);
659 inst->type = STACK_MP;
664 switch (type->type) {
666 inst->type = STACK_INV;
670 case MONO_TYPE_BOOLEAN:
676 inst->type = STACK_I4;
681 case MONO_TYPE_FNPTR:
682 inst->type = STACK_PTR;
684 case MONO_TYPE_CLASS:
685 case MONO_TYPE_STRING:
686 case MONO_TYPE_OBJECT:
687 case MONO_TYPE_SZARRAY:
688 case MONO_TYPE_ARRAY:
689 inst->type = STACK_OBJ;
693 inst->type = STACK_I8;
697 inst->type = STACK_R8;
699 case MONO_TYPE_VALUETYPE:
700 if (type->data.klass->enumtype) {
701 type = mono_class_enum_basetype (type->data.klass);
705 inst->type = STACK_VTYPE;
708 case MONO_TYPE_TYPEDBYREF:
709 inst->klass = mono_defaults.typed_reference_class;
710 inst->type = STACK_VTYPE;
712 case MONO_TYPE_GENERICINST:
713 type = &type->data.generic_class->container_class->byval_arg;
717 g_assert (cfg->generic_sharing_context);
718 if (mini_is_gsharedvt_type (cfg, type)) {
719 g_assert (cfg->gsharedvt);
720 inst->type = STACK_VTYPE;
722 inst->type = STACK_OBJ;
726 g_error ("unknown type 0x%02x in eval stack type", type->type);
731 * The following tables are used to quickly validate the IL code in type_from_op ().
734 bin_num_table [STACK_MAX] [STACK_MAX] = {
735 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
736 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
737 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
738 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
739 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV},
740 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
741 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
742 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
747 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV
750 /* reduce the size of this table */
752 bin_int_table [STACK_MAX] [STACK_MAX] = {
753 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
754 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
755 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
756 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
757 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
758 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
759 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
760 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
764 bin_comp_table [STACK_MAX] [STACK_MAX] = {
765 /* Inv i L p F & O vt */
767 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
768 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
769 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
770 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
771 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
772 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
773 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
776 /* reduce the size of this table */
778 shift_table [STACK_MAX] [STACK_MAX] = {
779 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
780 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
781 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
782 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
783 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
784 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
785 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
786 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
790 * Tables to map from the non-specific opcode to the matching
791 * type-specific opcode.
793 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
795 binops_op_map [STACK_MAX] = {
796 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD
799 /* handles from CEE_NEG to CEE_CONV_U8 */
801 unops_op_map [STACK_MAX] = {
802 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG
805 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
807 ovfops_op_map [STACK_MAX] = {
808 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2
811 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
813 ovf2ops_op_map [STACK_MAX] = {
814 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
817 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
819 ovf3ops_op_map [STACK_MAX] = {
820 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1
823 /* handles from CEE_BEQ to CEE_BLT_UN */
825 beqops_op_map [STACK_MAX] = {
826 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ
829 /* handles from CEE_CEQ to CEE_CLT_UN */
831 ceqops_op_map [STACK_MAX] = {
832 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ
836 * Sets ins->type (the type on the eval stack) according to the
837 * type of the opcode and the arguments to it.
838 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
840 * FIXME: this function sets ins->type unconditionally in some cases, but
841 * it should set it to invalid for some types (a conv.x on an object)
844 type_from_op (MonoInst *ins, MonoInst *src1, MonoInst *src2) {
846 switch (ins->opcode) {
853 /* FIXME: check unverifiable args for STACK_MP */
854 ins->type = bin_num_table [src1->type] [src2->type];
855 ins->opcode += binops_op_map [ins->type];
862 ins->type = bin_int_table [src1->type] [src2->type];
863 ins->opcode += binops_op_map [ins->type];
868 ins->type = shift_table [src1->type] [src2->type];
869 ins->opcode += binops_op_map [ins->type];
874 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
875 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
876 ins->opcode = OP_LCOMPARE;
877 else if (src1->type == STACK_R8)
878 ins->opcode = OP_FCOMPARE;
880 ins->opcode = OP_ICOMPARE;
882 case OP_ICOMPARE_IMM:
883 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
884 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
885 ins->opcode = OP_LCOMPARE_IMM;
897 ins->opcode += beqops_op_map [src1->type];
900 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
901 ins->opcode += ceqops_op_map [src1->type];
907 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
908 ins->opcode += ceqops_op_map [src1->type];
912 ins->type = neg_table [src1->type];
913 ins->opcode += unops_op_map [ins->type];
916 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
917 ins->type = src1->type;
919 ins->type = STACK_INV;
920 ins->opcode += unops_op_map [ins->type];
926 ins->type = STACK_I4;
927 ins->opcode += unops_op_map [src1->type];
930 ins->type = STACK_R8;
931 switch (src1->type) {
934 ins->opcode = OP_ICONV_TO_R_UN;
937 ins->opcode = OP_LCONV_TO_R_UN;
941 case CEE_CONV_OVF_I1:
942 case CEE_CONV_OVF_U1:
943 case CEE_CONV_OVF_I2:
944 case CEE_CONV_OVF_U2:
945 case CEE_CONV_OVF_I4:
946 case CEE_CONV_OVF_U4:
947 ins->type = STACK_I4;
948 ins->opcode += ovf3ops_op_map [src1->type];
950 case CEE_CONV_OVF_I_UN:
951 case CEE_CONV_OVF_U_UN:
952 ins->type = STACK_PTR;
953 ins->opcode += ovf2ops_op_map [src1->type];
955 case CEE_CONV_OVF_I1_UN:
956 case CEE_CONV_OVF_I2_UN:
957 case CEE_CONV_OVF_I4_UN:
958 case CEE_CONV_OVF_U1_UN:
959 case CEE_CONV_OVF_U2_UN:
960 case CEE_CONV_OVF_U4_UN:
961 ins->type = STACK_I4;
962 ins->opcode += ovf2ops_op_map [src1->type];
965 ins->type = STACK_PTR;
966 switch (src1->type) {
968 ins->opcode = OP_ICONV_TO_U;
972 #if SIZEOF_VOID_P == 8
973 ins->opcode = OP_LCONV_TO_U;
975 ins->opcode = OP_MOVE;
979 ins->opcode = OP_LCONV_TO_U;
982 ins->opcode = OP_FCONV_TO_U;
988 ins->type = STACK_I8;
989 ins->opcode += unops_op_map [src1->type];
991 case CEE_CONV_OVF_I8:
992 case CEE_CONV_OVF_U8:
993 ins->type = STACK_I8;
994 ins->opcode += ovf3ops_op_map [src1->type];
996 case CEE_CONV_OVF_U8_UN:
997 case CEE_CONV_OVF_I8_UN:
998 ins->type = STACK_I8;
999 ins->opcode += ovf2ops_op_map [src1->type];
1003 ins->type = STACK_R8;
1004 ins->opcode += unops_op_map [src1->type];
1007 ins->type = STACK_R8;
1011 ins->type = STACK_I4;
1012 ins->opcode += ovfops_op_map [src1->type];
1015 case CEE_CONV_OVF_I:
1016 case CEE_CONV_OVF_U:
1017 ins->type = STACK_PTR;
1018 ins->opcode += ovfops_op_map [src1->type];
1021 case CEE_ADD_OVF_UN:
1023 case CEE_MUL_OVF_UN:
1025 case CEE_SUB_OVF_UN:
1026 ins->type = bin_num_table [src1->type] [src2->type];
1027 ins->opcode += ovfops_op_map [src1->type];
1028 if (ins->type == STACK_R8)
1029 ins->type = STACK_INV;
1031 case OP_LOAD_MEMBASE:
1032 ins->type = STACK_PTR;
1034 case OP_LOADI1_MEMBASE:
1035 case OP_LOADU1_MEMBASE:
1036 case OP_LOADI2_MEMBASE:
1037 case OP_LOADU2_MEMBASE:
1038 case OP_LOADI4_MEMBASE:
1039 case OP_LOADU4_MEMBASE:
1040 ins->type = STACK_PTR;
1042 case OP_LOADI8_MEMBASE:
1043 ins->type = STACK_I8;
1045 case OP_LOADR4_MEMBASE:
1046 case OP_LOADR8_MEMBASE:
1047 ins->type = STACK_R8;
1050 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
1054 if (ins->type == STACK_MP)
1055 ins->klass = mono_defaults.object_class;
1060 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
1066 param_table [STACK_MAX] [STACK_MAX] = {
1071 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
1075 switch (args->type) {
1085 for (i = 0; i < sig->param_count; ++i) {
1086 switch (args [i].type) {
1090 if (!sig->params [i]->byref)
1094 if (sig->params [i]->byref)
1096 switch (sig->params [i]->type) {
1097 case MONO_TYPE_CLASS:
1098 case MONO_TYPE_STRING:
1099 case MONO_TYPE_OBJECT:
1100 case MONO_TYPE_SZARRAY:
1101 case MONO_TYPE_ARRAY:
1108 if (sig->params [i]->byref)
1110 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1119 /*if (!param_table [args [i].type] [sig->params [i]->type])
1127 * When we need a pointer to the current domain many times in a method, we
1128 * call mono_domain_get() once and we store the result in a local variable.
1129 * This function returns the variable that represents the MonoDomain*.
1131 inline static MonoInst *
1132 mono_get_domainvar (MonoCompile *cfg)
1134 if (!cfg->domainvar)
1135 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1136 return cfg->domainvar;
1140 * The got_var contains the address of the Global Offset Table when AOT
1144 mono_get_got_var (MonoCompile *cfg)
1146 #ifdef MONO_ARCH_NEED_GOT_VAR
1147 if (!cfg->compile_aot)
1149 if (!cfg->got_var) {
1150 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1152 return cfg->got_var;
1159 mono_get_vtable_var (MonoCompile *cfg)
1161 g_assert (cfg->generic_sharing_context);
1163 if (!cfg->rgctx_var) {
1164 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1165 /* force the var to be stack allocated */
1166 cfg->rgctx_var->flags |= MONO_INST_VOLATILE;
1169 return cfg->rgctx_var;
1173 type_from_stack_type (MonoInst *ins) {
1174 switch (ins->type) {
1175 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1176 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1177 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1178 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1180 return &ins->klass->this_arg;
1181 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1182 case STACK_VTYPE: return &ins->klass->byval_arg;
1184 g_error ("stack type %d to monotype not handled\n", ins->type);
1189 static G_GNUC_UNUSED int
1190 type_to_stack_type (MonoType *t)
1192 t = mono_type_get_underlying_type (t);
1196 case MONO_TYPE_BOOLEAN:
1199 case MONO_TYPE_CHAR:
1206 case MONO_TYPE_FNPTR:
1208 case MONO_TYPE_CLASS:
1209 case MONO_TYPE_STRING:
1210 case MONO_TYPE_OBJECT:
1211 case MONO_TYPE_SZARRAY:
1212 case MONO_TYPE_ARRAY:
1220 case MONO_TYPE_VALUETYPE:
1221 case MONO_TYPE_TYPEDBYREF:
1223 case MONO_TYPE_GENERICINST:
1224 if (mono_type_generic_inst_is_valuetype (t))
1230 g_assert_not_reached ();
1237 array_access_to_klass (int opcode)
1241 return mono_defaults.byte_class;
1243 return mono_defaults.uint16_class;
1246 return mono_defaults.int_class;
1249 return mono_defaults.sbyte_class;
1252 return mono_defaults.int16_class;
1255 return mono_defaults.int32_class;
1257 return mono_defaults.uint32_class;
1260 return mono_defaults.int64_class;
1263 return mono_defaults.single_class;
1266 return mono_defaults.double_class;
1267 case CEE_LDELEM_REF:
1268 case CEE_STELEM_REF:
1269 return mono_defaults.object_class;
1271 g_assert_not_reached ();
1277 * We try to share variables when possible
1280 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1285 /* inlining can result in deeper stacks */
1286 if (slot >= cfg->header->max_stack)
1287 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1289 pos = ins->type - 1 + slot * STACK_MAX;
1291 switch (ins->type) {
1298 if ((vnum = cfg->intvars [pos]))
1299 return cfg->varinfo [vnum];
1300 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1301 cfg->intvars [pos] = res->inst_c0;
1304 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1310 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1313 * Don't use this if a generic_context is set, since that means AOT can't
1314 * look up the method using just the image+token.
1315 * table == 0 means this is a reference made from a wrapper.
1317 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1318 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1319 jump_info_token->image = image;
1320 jump_info_token->token = token;
1321 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1326 * This function is called to handle items that are left on the evaluation stack
1327 * at basic block boundaries. What happens is that we save the values to local variables
1328 * and we reload them later when first entering the target basic block (with the
1329 * handle_loaded_temps () function).
1330 * A single joint point will use the same variables (stored in the array bb->out_stack or
1331 * bb->in_stack, if the basic block is before or after the joint point).
1333 * This function needs to be called _before_ emitting the last instruction of
1334 * the bb (i.e. before emitting a branch).
1335 * If the stack merge fails at a join point, cfg->unverifiable is set.
1338 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1341 MonoBasicBlock *bb = cfg->cbb;
1342 MonoBasicBlock *outb;
1343 MonoInst *inst, **locals;
1348 if (cfg->verbose_level > 3)
1349 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1350 if (!bb->out_scount) {
1351 bb->out_scount = count;
1352 //printf ("bblock %d has out:", bb->block_num);
1354 for (i = 0; i < bb->out_count; ++i) {
1355 outb = bb->out_bb [i];
1356 /* exception handlers are linked, but they should not be considered for stack args */
1357 if (outb->flags & BB_EXCEPTION_HANDLER)
1359 //printf (" %d", outb->block_num);
1360 if (outb->in_stack) {
1362 bb->out_stack = outb->in_stack;
1368 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1369 for (i = 0; i < count; ++i) {
1371 * try to reuse temps already allocated for this purpouse, if they occupy the same
1372 * stack slot and if they are of the same type.
1373 * This won't cause conflicts since if 'local' is used to
1374 * store one of the values in the in_stack of a bblock, then
1375 * the same variable will be used for the same outgoing stack
1377 * This doesn't work when inlining methods, since the bblocks
1378 * in the inlined methods do not inherit their in_stack from
1379 * the bblock they are inlined to. See bug #58863 for an
1382 if (cfg->inlined_method)
1383 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1385 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1390 for (i = 0; i < bb->out_count; ++i) {
1391 outb = bb->out_bb [i];
1392 /* exception handlers are linked, but they should not be considered for stack args */
1393 if (outb->flags & BB_EXCEPTION_HANDLER)
1395 if (outb->in_scount) {
1396 if (outb->in_scount != bb->out_scount) {
1397 cfg->unverifiable = TRUE;
1400 continue; /* check they are the same locals */
1402 outb->in_scount = count;
1403 outb->in_stack = bb->out_stack;
1406 locals = bb->out_stack;
1408 for (i = 0; i < count; ++i) {
1409 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1410 inst->cil_code = sp [i]->cil_code;
1411 sp [i] = locals [i];
1412 if (cfg->verbose_level > 3)
1413 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1417 * It is possible that the out bblocks already have in_stack assigned, and
1418 * the in_stacks differ. In this case, we will store to all the different
1425 /* Find a bblock which has a different in_stack */
1427 while (bindex < bb->out_count) {
1428 outb = bb->out_bb [bindex];
1429 /* exception handlers are linked, but they should not be considered for stack args */
1430 if (outb->flags & BB_EXCEPTION_HANDLER) {
1434 if (outb->in_stack != locals) {
1435 for (i = 0; i < count; ++i) {
1436 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1437 inst->cil_code = sp [i]->cil_code;
1438 sp [i] = locals [i];
1439 if (cfg->verbose_level > 3)
1440 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1442 locals = outb->in_stack;
1451 /* Emit code which loads interface_offsets [klass->interface_id]
1452 * The array is stored in memory before vtable.
1455 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1457 if (cfg->compile_aot) {
1458 int ioffset_reg = alloc_preg (cfg);
1459 int iid_reg = alloc_preg (cfg);
1461 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1462 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1463 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1466 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1471 mini_emit_interface_bitmap_check (MonoCompile *cfg, int intf_bit_reg, int base_reg, int offset, MonoClass *klass)
1473 int ibitmap_reg = alloc_preg (cfg);
1474 #ifdef COMPRESSED_INTERFACE_BITMAP
1476 MonoInst *res, *ins;
1477 NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, ibitmap_reg, base_reg, offset);
1478 MONO_ADD_INS (cfg->cbb, ins);
1480 if (cfg->compile_aot)
1481 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_IID, klass);
1483 EMIT_NEW_ICONST (cfg, args [1], klass->interface_id);
1484 res = mono_emit_jit_icall (cfg, mono_class_interface_match, args);
1485 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, intf_bit_reg, res->dreg);
1487 int ibitmap_byte_reg = alloc_preg (cfg);
1489 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, base_reg, offset);
1491 if (cfg->compile_aot) {
1492 int iid_reg = alloc_preg (cfg);
1493 int shifted_iid_reg = alloc_preg (cfg);
1494 int ibitmap_byte_address_reg = alloc_preg (cfg);
1495 int masked_iid_reg = alloc_preg (cfg);
1496 int iid_one_bit_reg = alloc_preg (cfg);
1497 int iid_bit_reg = alloc_preg (cfg);
1498 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1499 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1500 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1501 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1502 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1503 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1504 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1505 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1507 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1508 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1514 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1515 * stored in "klass_reg" implements the interface "klass".
1518 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1520 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, interface_bitmap), klass);
1524 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1525 * stored in "vtable_reg" implements the interface "klass".
1528 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1530 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, interface_bitmap), klass);
1534 * Emit code which checks whenever the interface id of @klass is smaller than
1535 * than the value given by max_iid_reg.
1538 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1539 MonoBasicBlock *false_target)
1541 if (cfg->compile_aot) {
1542 int iid_reg = alloc_preg (cfg);
1543 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1544 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1547 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1549 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1551 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1554 /* Same as above, but obtains max_iid from a vtable */
1556 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1557 MonoBasicBlock *false_target)
1559 int max_iid_reg = alloc_preg (cfg);
1561 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, max_interface_id));
1562 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1565 /* Same as above, but obtains max_iid from a klass */
1567 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1568 MonoBasicBlock *false_target)
1570 int max_iid_reg = alloc_preg (cfg);
1572 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, max_interface_id));
1573 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1577 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1579 int idepth_reg = alloc_preg (cfg);
1580 int stypes_reg = alloc_preg (cfg);
1581 int stype = alloc_preg (cfg);
1583 mono_class_setup_supertypes (klass);
1585 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1586 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1587 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1588 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1590 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1591 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1593 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1594 } else if (cfg->compile_aot) {
1595 int const_reg = alloc_preg (cfg);
1596 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1597 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1599 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1601 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1605 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1607 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1611 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1613 int intf_reg = alloc_preg (cfg);
1615 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1616 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1617 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1619 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1621 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1625 * Variant of the above that takes a register to the class, not the vtable.
1628 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1630 int intf_bit_reg = alloc_preg (cfg);
1632 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1633 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1634 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1636 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1638 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1642 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1645 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1646 } else if (cfg->compile_aot) {
1647 int const_reg = alloc_preg (cfg);
1648 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1649 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1651 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1653 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1657 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1659 mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1663 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1665 if (cfg->compile_aot) {
1666 int const_reg = alloc_preg (cfg);
1667 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1668 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1670 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1672 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1676 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1679 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1682 int rank_reg = alloc_preg (cfg);
1683 int eclass_reg = alloc_preg (cfg);
1685 g_assert (!klass_inst);
1686 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, rank));
1687 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1688 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1689 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1690 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
1691 if (klass->cast_class == mono_defaults.object_class) {
1692 int parent_reg = alloc_preg (cfg);
1693 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
1694 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1695 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1696 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1697 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1698 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1699 } else if (klass->cast_class == mono_defaults.enum_class) {
1700 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1701 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1702 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1704 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1705 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1708 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1709 /* Check that the object is a vector too */
1710 int bounds_reg = alloc_preg (cfg);
1711 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
1712 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1713 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1716 int idepth_reg = alloc_preg (cfg);
1717 int stypes_reg = alloc_preg (cfg);
1718 int stype = alloc_preg (cfg);
1720 mono_class_setup_supertypes (klass);
1722 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1723 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1724 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1725 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1727 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1728 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1729 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1734 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1736 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1740 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1744 g_assert (val == 0);
1749 if ((size <= 4) && (size <= align)) {
1752 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1755 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1758 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1760 #if SIZEOF_REGISTER == 8
1762 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1768 val_reg = alloc_preg (cfg);
1770 if (SIZEOF_REGISTER == 8)
1771 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1773 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1776 /* This could be optimized further if neccesary */
1778 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1785 #if !NO_UNALIGNED_ACCESS
1786 if (SIZEOF_REGISTER == 8) {
1788 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1793 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1801 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1806 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1811 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1818 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1825 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1826 g_assert (size < 10000);
1829 /* This could be optimized further if neccesary */
1831 cur_reg = alloc_preg (cfg);
1832 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1833 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1840 #if !NO_UNALIGNED_ACCESS
1841 if (SIZEOF_REGISTER == 8) {
1843 cur_reg = alloc_preg (cfg);
1844 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1845 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1854 cur_reg = alloc_preg (cfg);
1855 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1856 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1862 cur_reg = alloc_preg (cfg);
1863 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1864 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1870 cur_reg = alloc_preg (cfg);
1871 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1872 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1880 emit_tls_set (MonoCompile *cfg, int sreg1, int tls_key)
1884 if (cfg->compile_aot) {
1885 EMIT_NEW_TLS_OFFSETCONST (cfg, c, tls_key);
1886 MONO_INST_NEW (cfg, ins, OP_TLS_SET_REG);
1888 ins->sreg2 = c->dreg;
1889 MONO_ADD_INS (cfg->cbb, ins);
1891 MONO_INST_NEW (cfg, ins, OP_TLS_SET);
1893 ins->inst_offset = mini_get_tls_offset (tls_key);
1894 MONO_ADD_INS (cfg->cbb, ins);
1901 * Emit IR to push the current LMF onto the LMF stack.
1904 emit_push_lmf (MonoCompile *cfg)
1907 * Emit IR to push the LMF:
1908 * lmf_addr = <lmf_addr from tls>
1909 * lmf->lmf_addr = lmf_addr
1910 * lmf->prev_lmf = *lmf_addr
1913 int lmf_reg, prev_lmf_reg;
1914 MonoInst *ins, *lmf_ins;
1919 if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
1920 /* Load current lmf */
1921 lmf_ins = mono_get_lmf_intrinsic (cfg);
1923 MONO_ADD_INS (cfg->cbb, lmf_ins);
1924 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
1925 lmf_reg = ins->dreg;
1926 /* Save previous_lmf */
1927 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, G_STRUCT_OFFSET (MonoLMF, previous_lmf), lmf_ins->dreg);
1929 emit_tls_set (cfg, lmf_reg, TLS_KEY_LMF);
1932 * Store lmf_addr in a variable, so it can be allocated to a global register.
1934 if (!cfg->lmf_addr_var)
1935 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1938 ins = mono_get_jit_tls_intrinsic (cfg);
1940 int jit_tls_dreg = ins->dreg;
1942 MONO_ADD_INS (cfg->cbb, ins);
1943 lmf_reg = alloc_preg (cfg);
1944 EMIT_NEW_BIALU_IMM (cfg, lmf_ins, OP_PADD_IMM, lmf_reg, jit_tls_dreg, G_STRUCT_OFFSET (MonoJitTlsData, lmf));
1946 lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
1949 lmf_ins = mono_get_lmf_addr_intrinsic (cfg);
1951 MONO_ADD_INS (cfg->cbb, lmf_ins);
1953 lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
1955 lmf_ins->dreg = cfg->lmf_addr_var->dreg;
1957 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
1958 lmf_reg = ins->dreg;
1960 prev_lmf_reg = alloc_preg (cfg);
1961 /* Save previous_lmf */
1962 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, cfg->lmf_addr_var->dreg, 0);
1963 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, G_STRUCT_OFFSET (MonoLMF, previous_lmf), prev_lmf_reg);
1965 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, cfg->lmf_addr_var->dreg, 0, lmf_reg);
1972 * Emit IR to pop the current LMF from the LMF stack.
1975 emit_pop_lmf (MonoCompile *cfg)
1977 int lmf_reg, lmf_addr_reg, prev_lmf_reg;
1983 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
1984 lmf_reg = ins->dreg;
1986 if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
1987 /* Load previous_lmf */
1988 prev_lmf_reg = alloc_preg (cfg);
1989 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
1991 emit_tls_set (cfg, prev_lmf_reg, TLS_KEY_LMF);
1994 * Emit IR to pop the LMF:
1995 * *(lmf->lmf_addr) = lmf->prev_lmf
1997 /* This could be called before emit_push_lmf () */
1998 if (!cfg->lmf_addr_var)
1999 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2000 lmf_addr_reg = cfg->lmf_addr_var->dreg;
2002 prev_lmf_reg = alloc_preg (cfg);
2003 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
2004 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_addr_reg, 0, prev_lmf_reg);
2009 ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
2012 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2015 type = mini_get_basic_type_from_generic (gsctx, type);
2016 type = mini_replace_type (type);
2017 switch (type->type) {
2018 case MONO_TYPE_VOID:
2019 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALL_MEMBASE: OP_VOIDCALL;
2022 case MONO_TYPE_BOOLEAN:
2025 case MONO_TYPE_CHAR:
2028 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2032 case MONO_TYPE_FNPTR:
2033 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2034 case MONO_TYPE_CLASS:
2035 case MONO_TYPE_STRING:
2036 case MONO_TYPE_OBJECT:
2037 case MONO_TYPE_SZARRAY:
2038 case MONO_TYPE_ARRAY:
2039 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2042 return calli? OP_LCALL_REG: virt? OP_LCALL_MEMBASE: OP_LCALL;
2045 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
2046 case MONO_TYPE_VALUETYPE:
2047 if (type->data.klass->enumtype) {
2048 type = mono_class_enum_basetype (type->data.klass);
2051 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2052 case MONO_TYPE_TYPEDBYREF:
2053 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2054 case MONO_TYPE_GENERICINST:
2055 type = &type->data.generic_class->container_class->byval_arg;
2058 case MONO_TYPE_MVAR:
2060 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2062 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
2068 * target_type_is_incompatible:
2069 * @cfg: MonoCompile context
2071 * Check that the item @arg on the evaluation stack can be stored
2072 * in the target type (can be a local, or field, etc).
2073 * The cfg arg can be used to check if we need verification or just
2076 * Returns: non-0 value if arg can't be stored on a target.
2079 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
2081 MonoType *simple_type;
2084 target = mini_replace_type (target);
2085 if (target->byref) {
2086 /* FIXME: check that the pointed to types match */
2087 if (arg->type == STACK_MP)
2088 return arg->klass != mono_class_from_mono_type (target);
2089 if (arg->type == STACK_PTR)
2094 simple_type = mono_type_get_underlying_type (target);
2095 switch (simple_type->type) {
2096 case MONO_TYPE_VOID:
2100 case MONO_TYPE_BOOLEAN:
2103 case MONO_TYPE_CHAR:
2106 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
2110 /* STACK_MP is needed when setting pinned locals */
2111 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2116 case MONO_TYPE_FNPTR:
2118 * Some opcodes like ldloca returns 'transient pointers' which can be stored in
2119 * in native int. (#688008).
2121 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2124 case MONO_TYPE_CLASS:
2125 case MONO_TYPE_STRING:
2126 case MONO_TYPE_OBJECT:
2127 case MONO_TYPE_SZARRAY:
2128 case MONO_TYPE_ARRAY:
2129 if (arg->type != STACK_OBJ)
2131 /* FIXME: check type compatibility */
2135 if (arg->type != STACK_I8)
2140 if (arg->type != STACK_R8)
2143 case MONO_TYPE_VALUETYPE:
2144 if (arg->type != STACK_VTYPE)
2146 klass = mono_class_from_mono_type (simple_type);
2147 if (klass != arg->klass)
2150 case MONO_TYPE_TYPEDBYREF:
2151 if (arg->type != STACK_VTYPE)
2153 klass = mono_class_from_mono_type (simple_type);
2154 if (klass != arg->klass)
2157 case MONO_TYPE_GENERICINST:
2158 if (mono_type_generic_inst_is_valuetype (simple_type)) {
2159 if (arg->type != STACK_VTYPE)
2161 klass = mono_class_from_mono_type (simple_type);
2162 if (klass != arg->klass)
2166 if (arg->type != STACK_OBJ)
2168 /* FIXME: check type compatibility */
2172 case MONO_TYPE_MVAR:
2173 g_assert (cfg->generic_sharing_context);
2174 if (mini_type_var_is_vt (cfg, simple_type)) {
2175 if (arg->type != STACK_VTYPE)
2178 if (arg->type != STACK_OBJ)
2183 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
2189 * Prepare arguments for passing to a function call.
2190 * Return a non-zero value if the arguments can't be passed to the given
2192 * The type checks are not yet complete and some conversions may need
2193 * casts on 32 or 64 bit architectures.
2195 * FIXME: implement this using target_type_is_incompatible ()
2198 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
2200 MonoType *simple_type;
2204 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
2208 for (i = 0; i < sig->param_count; ++i) {
2209 if (sig->params [i]->byref) {
2210 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
2214 simple_type = sig->params [i];
2215 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
2217 switch (simple_type->type) {
2218 case MONO_TYPE_VOID:
2223 case MONO_TYPE_BOOLEAN:
2226 case MONO_TYPE_CHAR:
2229 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2235 case MONO_TYPE_FNPTR:
2236 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2239 case MONO_TYPE_CLASS:
2240 case MONO_TYPE_STRING:
2241 case MONO_TYPE_OBJECT:
2242 case MONO_TYPE_SZARRAY:
2243 case MONO_TYPE_ARRAY:
2244 if (args [i]->type != STACK_OBJ)
2249 if (args [i]->type != STACK_I8)
2254 if (args [i]->type != STACK_R8)
2257 case MONO_TYPE_VALUETYPE:
2258 if (simple_type->data.klass->enumtype) {
2259 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2262 if (args [i]->type != STACK_VTYPE)
2265 case MONO_TYPE_TYPEDBYREF:
2266 if (args [i]->type != STACK_VTYPE)
2269 case MONO_TYPE_GENERICINST:
2270 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2273 case MONO_TYPE_MVAR:
2275 if (args [i]->type != STACK_VTYPE)
2279 g_error ("unknown type 0x%02x in check_call_signature",
2287 callvirt_to_call (int opcode)
2290 case OP_CALL_MEMBASE:
2292 case OP_VOIDCALL_MEMBASE:
2294 case OP_FCALL_MEMBASE:
2296 case OP_VCALL_MEMBASE:
2298 case OP_LCALL_MEMBASE:
2301 g_assert_not_reached ();
2307 #ifdef MONO_ARCH_HAVE_IMT
2308 /* Either METHOD or IMT_ARG needs to be set */
2310 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoMethod *method, MonoInst *imt_arg)
2314 if (COMPILE_LLVM (cfg)) {
2315 method_reg = alloc_preg (cfg);
2318 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2319 } else if (cfg->compile_aot) {
2320 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, method, MONO_PATCH_INFO_METHODCONST);
2323 MONO_INST_NEW (cfg, ins, OP_PCONST);
2324 ins->inst_p0 = method;
2325 ins->dreg = method_reg;
2326 MONO_ADD_INS (cfg->cbb, ins);
2330 call->imt_arg_reg = method_reg;
2332 #ifdef MONO_ARCH_IMT_REG
2333 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2335 /* Need this to keep the IMT arg alive */
2336 mono_call_inst_add_outarg_reg (cfg, call, method_reg, 0, FALSE);
2341 #ifdef MONO_ARCH_IMT_REG
2342 method_reg = alloc_preg (cfg);
2345 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2346 } else if (cfg->compile_aot) {
2347 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, method, MONO_PATCH_INFO_METHODCONST);
2350 MONO_INST_NEW (cfg, ins, OP_PCONST);
2351 ins->inst_p0 = method;
2352 ins->dreg = method_reg;
2353 MONO_ADD_INS (cfg->cbb, ins);
2356 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2358 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2363 static MonoJumpInfo *
2364 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2366 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2370 ji->data.target = target;
2376 mini_class_check_context_used (MonoCompile *cfg, MonoClass *klass)
2378 if (cfg->generic_sharing_context)
2379 return mono_class_check_context_used (klass);
2385 mini_method_check_context_used (MonoCompile *cfg, MonoMethod *method)
2387 if (cfg->generic_sharing_context)
2388 return mono_method_check_context_used (method);
2394 * check_method_sharing:
2396 * Check whenever the vtable or an mrgctx needs to be passed when calling CMETHOD.
2399 check_method_sharing (MonoCompile *cfg, MonoMethod *cmethod, gboolean *out_pass_vtable, gboolean *out_pass_mrgctx)
2401 gboolean pass_vtable = FALSE;
2402 gboolean pass_mrgctx = FALSE;
2404 if (((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
2405 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
2406 gboolean sharable = FALSE;
2408 if (mono_method_is_generic_sharable (cmethod, TRUE)) {
2411 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
2412 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
2413 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
2415 sharable = sharing_enabled && context_sharable;
2419 * Pass vtable iff target method might
2420 * be shared, which means that sharing
2421 * is enabled for its class and its
2422 * context is sharable (and it's not a
2425 if (sharable && !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
2429 if (mini_method_get_context (cmethod) &&
2430 mini_method_get_context (cmethod)->method_inst) {
2431 g_assert (!pass_vtable);
2433 if (mono_method_is_generic_sharable (cmethod, TRUE)) {
2436 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
2437 MonoGenericContext *context = mini_method_get_context (cmethod);
2438 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
2440 if (sharing_enabled && context_sharable)
2442 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, mono_method_signature (cmethod)))
2447 if (out_pass_vtable)
2448 *out_pass_vtable = pass_vtable;
2449 if (out_pass_mrgctx)
2450 *out_pass_mrgctx = pass_mrgctx;
2453 inline static MonoCallInst *
2454 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2455 MonoInst **args, int calli, int virtual, int tail, int rgctx, int unbox_trampoline)
2459 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2464 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2466 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
2469 call->signature = sig;
2470 call->rgctx_reg = rgctx;
2471 sig_ret = mini_replace_type (sig->ret);
2473 type_to_eval_stack_type ((cfg), sig_ret, &call->inst);
2476 if (mini_type_is_vtype (cfg, sig_ret)) {
2477 call->vret_var = cfg->vret_addr;
2478 //g_assert_not_reached ();
2480 } else if (mini_type_is_vtype (cfg, sig_ret)) {
2481 MonoInst *temp = mono_compile_create_var (cfg, sig_ret, OP_LOCAL);
2484 temp->backend.is_pinvoke = sig->pinvoke;
2487 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2488 * address of return value to increase optimization opportunities.
2489 * Before vtype decomposition, the dreg of the call ins itself represents the
2490 * fact the call modifies the return value. After decomposition, the call will
2491 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2492 * will be transformed into an LDADDR.
2494 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2495 loada->dreg = alloc_preg (cfg);
2496 loada->inst_p0 = temp;
2497 /* We reference the call too since call->dreg could change during optimization */
2498 loada->inst_p1 = call;
2499 MONO_ADD_INS (cfg->cbb, loada);
2501 call->inst.dreg = temp->dreg;
2503 call->vret_var = loada;
2504 } else if (!MONO_TYPE_IS_VOID (sig_ret))
2505 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2507 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2508 if (COMPILE_SOFT_FLOAT (cfg)) {
2510 * If the call has a float argument, we would need to do an r8->r4 conversion using
2511 * an icall, but that cannot be done during the call sequence since it would clobber
2512 * the call registers + the stack. So we do it before emitting the call.
2514 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2516 MonoInst *in = call->args [i];
2518 if (i >= sig->hasthis)
2519 t = sig->params [i - sig->hasthis];
2521 t = &mono_defaults.int_class->byval_arg;
2522 t = mono_type_get_underlying_type (t);
2524 if (!t->byref && t->type == MONO_TYPE_R4) {
2525 MonoInst *iargs [1];
2529 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2531 /* The result will be in an int vreg */
2532 call->args [i] = conv;
2538 call->need_unbox_trampoline = unbox_trampoline;
2541 if (COMPILE_LLVM (cfg))
2542 mono_llvm_emit_call (cfg, call);
2544 mono_arch_emit_call (cfg, call);
2546 mono_arch_emit_call (cfg, call);
2549 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2550 cfg->flags |= MONO_CFG_HAS_CALLS;
2556 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2558 #ifdef MONO_ARCH_RGCTX_REG
2559 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2560 cfg->uses_rgctx_reg = TRUE;
2561 call->rgctx_reg = TRUE;
2563 call->rgctx_arg_reg = rgctx_reg;
2570 inline static MonoInst*
2571 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *imt_arg, MonoInst *rgctx_arg)
2576 gboolean check_sp = FALSE;
2578 if (cfg->check_pinvoke_callconv && cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
2579 WrapperInfo *info = mono_marshal_get_wrapper_info (cfg->method);
2581 if (info && info->subtype == WRAPPER_SUBTYPE_PINVOKE)
2586 rgctx_reg = mono_alloc_preg (cfg);
2587 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2591 if (!cfg->stack_inbalance_var)
2592 cfg->stack_inbalance_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2594 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2595 ins->dreg = cfg->stack_inbalance_var->dreg;
2596 MONO_ADD_INS (cfg->cbb, ins);
2599 call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE, rgctx_arg ? TRUE : FALSE, FALSE);
2601 call->inst.sreg1 = addr->dreg;
2604 emit_imt_argument (cfg, call, NULL, imt_arg);
2606 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2611 sp_reg = mono_alloc_preg (cfg);
2613 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2615 MONO_ADD_INS (cfg->cbb, ins);
2617 /* Restore the stack so we don't crash when throwing the exception */
2618 MONO_INST_NEW (cfg, ins, OP_SET_SP);
2619 ins->sreg1 = cfg->stack_inbalance_var->dreg;
2620 MONO_ADD_INS (cfg->cbb, ins);
2622 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, cfg->stack_inbalance_var->dreg, sp_reg);
2623 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ExecutionEngineException");
2627 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2629 return (MonoInst*)call;
2633 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2636 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type);
2638 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2641 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig, gboolean tail,
2642 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *rgctx_arg)
2644 #ifndef DISABLE_REMOTING
2645 gboolean might_be_remote = FALSE;
2647 gboolean virtual = this != NULL;
2648 gboolean enable_for_aot = TRUE;
2652 gboolean need_unbox_trampoline;
2655 sig = mono_method_signature (method);
2658 rgctx_reg = mono_alloc_preg (cfg);
2659 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2662 if (method->string_ctor) {
2663 /* Create the real signature */
2664 /* FIXME: Cache these */
2665 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2666 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2671 context_used = mini_method_check_context_used (cfg, method);
2673 #ifndef DISABLE_REMOTING
2674 might_be_remote = this && sig->hasthis &&
2675 (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) &&
2676 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && (!MONO_CHECK_THIS (this) || context_used);
2678 if (might_be_remote && context_used) {
2681 g_assert (cfg->generic_sharing_context);
2683 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2685 return mono_emit_calli (cfg, sig, args, addr, NULL, NULL);
2689 need_unbox_trampoline = method->klass == mono_defaults.object_class || (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE);
2691 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, tail, rgctx_arg ? TRUE : FALSE, need_unbox_trampoline);
2693 #ifndef DISABLE_REMOTING
2694 if (might_be_remote)
2695 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2698 call->method = method;
2699 call->inst.flags |= MONO_INST_HAS_METHOD;
2700 call->inst.inst_left = this;
2701 call->tail_call = tail;
2704 int vtable_reg, slot_reg, this_reg;
2707 this_reg = this->dreg;
2709 if (ARCH_HAVE_DELEGATE_TRAMPOLINES && (method->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (method->name, "Invoke")) {
2710 MonoInst *dummy_use;
2712 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2714 /* Make a call to delegate->invoke_impl */
2715 call->inst.inst_basereg = this_reg;
2716 call->inst.inst_offset = G_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2717 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2719 /* We must emit a dummy use here because the delegate trampoline will
2720 replace the 'this' argument with the delegate target making this activation
2721 no longer a root for the delegate.
2722 This is an issue for delegates that target collectible code such as dynamic
2723 methods of GC'able assemblies.
2725 For a test case look into #667921.
2727 FIXME: a dummy use is not the best way to do it as the local register allocator
2728 will put it on a caller save register and spil it around the call.
2729 Ideally, we would either put it on a callee save register or only do the store part.
2731 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [0]);
2733 return (MonoInst*)call;
2736 if ((!cfg->compile_aot || enable_for_aot) &&
2737 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2738 (MONO_METHOD_IS_FINAL (method) &&
2739 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2740 !(mono_class_is_marshalbyref (method->klass) && context_used)) {
2742 * the method is not virtual, we just need to ensure this is not null
2743 * and then we can call the method directly.
2745 #ifndef DISABLE_REMOTING
2746 if (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) {
2748 * The check above ensures method is not gshared, this is needed since
2749 * gshared methods can't have wrappers.
2751 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2755 if (!method->string_ctor)
2756 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2758 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2759 } else if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2761 * the method is virtual, but we can statically dispatch since either
2762 * it's class or the method itself are sealed.
2763 * But first we need to ensure it's not a null reference.
2765 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2767 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2769 vtable_reg = alloc_preg (cfg);
2770 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2771 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2773 #ifdef MONO_ARCH_HAVE_IMT
2775 guint32 imt_slot = mono_method_get_imt_slot (method);
2776 emit_imt_argument (cfg, call, call->method, imt_arg);
2777 slot_reg = vtable_reg;
2778 offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2781 if (slot_reg == -1) {
2782 slot_reg = alloc_preg (cfg);
2783 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2784 offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
2787 slot_reg = vtable_reg;
2788 offset = G_STRUCT_OFFSET (MonoVTable, vtable) +
2789 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2790 #ifdef MONO_ARCH_HAVE_IMT
2792 g_assert (mono_method_signature (method)->generic_param_count);
2793 emit_imt_argument (cfg, call, call->method, imt_arg);
2798 call->inst.sreg1 = slot_reg;
2799 call->inst.inst_offset = offset;
2800 call->virtual = TRUE;
2804 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2807 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2809 return (MonoInst*)call;
2813 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2815 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), FALSE, args, this, NULL, NULL);
2819 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2826 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE, FALSE, FALSE);
2829 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2831 return (MonoInst*)call;
2835 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2837 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2841 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2845 * mono_emit_abs_call:
2847 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2849 inline static MonoInst*
2850 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2851 MonoMethodSignature *sig, MonoInst **args)
2853 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2857 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2860 if (cfg->abs_patches == NULL)
2861 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2862 g_hash_table_insert (cfg->abs_patches, ji, ji);
2863 ins = mono_emit_native_call (cfg, ji, sig, args);
2864 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2869 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2871 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2872 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2876 * Native code might return non register sized integers
2877 * without initializing the upper bits.
2879 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
2880 case OP_LOADI1_MEMBASE:
2881 widen_op = OP_ICONV_TO_I1;
2883 case OP_LOADU1_MEMBASE:
2884 widen_op = OP_ICONV_TO_U1;
2886 case OP_LOADI2_MEMBASE:
2887 widen_op = OP_ICONV_TO_I2;
2889 case OP_LOADU2_MEMBASE:
2890 widen_op = OP_ICONV_TO_U2;
2896 if (widen_op != -1) {
2897 int dreg = alloc_preg (cfg);
2900 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
2901 widen->type = ins->type;
2911 get_memcpy_method (void)
2913 static MonoMethod *memcpy_method = NULL;
2914 if (!memcpy_method) {
2915 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2917 g_error ("Old corlib found. Install a new one");
2919 return memcpy_method;
2923 create_write_barrier_bitmap (MonoCompile *cfg, MonoClass *klass, unsigned *wb_bitmap, int offset)
2925 MonoClassField *field;
2926 gpointer iter = NULL;
2928 while ((field = mono_class_get_fields (klass, &iter))) {
2931 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
2933 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
2934 if (mini_type_is_reference (cfg, mono_field_get_type (field))) {
2935 g_assert ((foffset % SIZEOF_VOID_P) == 0);
2936 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
2938 MonoClass *field_class = mono_class_from_mono_type (field->type);
2939 if (field_class->has_references)
2940 create_write_barrier_bitmap (cfg, field_class, wb_bitmap, offset + foffset);
2946 emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value)
2948 int card_table_shift_bits;
2949 gpointer card_table_mask;
2951 MonoInst *dummy_use;
2952 int nursery_shift_bits;
2953 size_t nursery_size;
2954 gboolean has_card_table_wb = FALSE;
2956 if (!cfg->gen_write_barriers)
2959 card_table = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
2961 mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
2963 #ifdef MONO_ARCH_HAVE_CARD_TABLE_WBARRIER
2964 has_card_table_wb = TRUE;
2967 if (has_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0 && !COMPILE_LLVM (cfg)) {
2970 MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
2971 wbarrier->sreg1 = ptr->dreg;
2972 wbarrier->sreg2 = value->dreg;
2973 MONO_ADD_INS (cfg->cbb, wbarrier);
2974 } else if (card_table) {
2975 int offset_reg = alloc_preg (cfg);
2976 int card_reg = alloc_preg (cfg);
2979 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
2980 if (card_table_mask)
2981 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
2983 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
2984 * IMM's larger than 32bits.
2986 if (cfg->compile_aot) {
2987 MONO_EMIT_NEW_AOTCONST (cfg, card_reg, NULL, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR);
2989 MONO_INST_NEW (cfg, ins, OP_PCONST);
2990 ins->inst_p0 = card_table;
2991 ins->dreg = card_reg;
2992 MONO_ADD_INS (cfg->cbb, ins);
2995 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
2996 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
2998 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
2999 mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
3002 EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
3006 mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
3008 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
3009 unsigned need_wb = 0;
3014 /*types with references can't have alignment smaller than sizeof(void*) */
3015 if (align < SIZEOF_VOID_P)
3018 /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
3019 if (size > 32 * SIZEOF_VOID_P)
3022 create_write_barrier_bitmap (cfg, klass, &need_wb, 0);
3024 /* We don't unroll more than 5 stores to avoid code bloat. */
3025 if (size > 5 * SIZEOF_VOID_P) {
3026 /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
3027 size += (SIZEOF_VOID_P - 1);
3028 size &= ~(SIZEOF_VOID_P - 1);
3030 EMIT_NEW_ICONST (cfg, iargs [2], size);
3031 EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
3032 mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
3036 destreg = iargs [0]->dreg;
3037 srcreg = iargs [1]->dreg;
3040 dest_ptr_reg = alloc_preg (cfg);
3041 tmp_reg = alloc_preg (cfg);
3044 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
3046 while (size >= SIZEOF_VOID_P) {
3047 MonoInst *load_inst;
3048 MONO_INST_NEW (cfg, load_inst, OP_LOAD_MEMBASE);
3049 load_inst->dreg = tmp_reg;
3050 load_inst->inst_basereg = srcreg;
3051 load_inst->inst_offset = offset;
3052 MONO_ADD_INS (cfg->cbb, load_inst);
3054 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
3057 emit_write_barrier (cfg, iargs [0], load_inst);
3059 offset += SIZEOF_VOID_P;
3060 size -= SIZEOF_VOID_P;
3063 /*tmp += sizeof (void*)*/
3064 if (size >= SIZEOF_VOID_P) {
3065 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
3066 MONO_ADD_INS (cfg->cbb, iargs [0]);
3070 /* Those cannot be references since size < sizeof (void*) */
3072 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
3073 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
3079 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
3080 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
3086 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
3087 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
3096 * Emit code to copy a valuetype of type @klass whose address is stored in
3097 * @src->dreg to memory whose address is stored at @dest->dreg.
3100 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
3102 MonoInst *iargs [4];
3103 int context_used, n;
3105 MonoMethod *memcpy_method;
3106 MonoInst *size_ins = NULL;
3107 MonoInst *memcpy_ins = NULL;
3111 * This check breaks with spilled vars... need to handle it during verification anyway.
3112 * g_assert (klass && klass == src->klass && klass == dest->klass);
3115 if (mini_is_gsharedvt_klass (cfg, klass)) {
3117 context_used = mini_class_check_context_used (cfg, klass);
3118 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3119 memcpy_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_MEMCPY);
3123 n = mono_class_native_size (klass, &align);
3125 n = mono_class_value_size (klass, &align);
3127 /* if native is true there should be no references in the struct */
3128 if (cfg->gen_write_barriers && (klass->has_references || size_ins) && !native) {
3129 /* Avoid barriers when storing to the stack */
3130 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
3131 (dest->opcode == OP_LDADDR))) {
3137 context_used = mini_class_check_context_used (cfg, klass);
3139 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
3140 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
3142 } else if (context_used) {
3143 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3145 if (cfg->compile_aot) {
3146 EMIT_NEW_CLASSCONST (cfg, iargs [2], klass);
3148 EMIT_NEW_PCONST (cfg, iargs [2], klass);
3149 mono_class_compute_gc_descriptor (klass);
3154 mono_emit_jit_icall (cfg, mono_gsharedvt_value_copy, iargs);
3156 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
3161 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
3162 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
3163 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
3168 iargs [2] = size_ins;
3170 EMIT_NEW_ICONST (cfg, iargs [2], n);
3172 memcpy_method = get_memcpy_method ();
3174 mono_emit_calli (cfg, mono_method_signature (memcpy_method), iargs, memcpy_ins, NULL, NULL);
3176 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
3181 get_memset_method (void)
3183 static MonoMethod *memset_method = NULL;
3184 if (!memset_method) {
3185 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
3187 g_error ("Old corlib found. Install a new one");
3189 return memset_method;
3193 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
3195 MonoInst *iargs [3];
3196 int n, context_used;
3198 MonoMethod *memset_method;
3199 MonoInst *size_ins = NULL;
3200 MonoInst *bzero_ins = NULL;
3201 static MonoMethod *bzero_method;
3203 /* FIXME: Optimize this for the case when dest is an LDADDR */
3205 mono_class_init (klass);
3206 if (mini_is_gsharedvt_klass (cfg, klass)) {
3207 context_used = mini_class_check_context_used (cfg, klass);
3208 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3209 bzero_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_BZERO);
3211 bzero_method = mono_class_get_method_from_name (mono_defaults.string_class, "bzero_aligned_1", 2);
3212 g_assert (bzero_method);
3214 iargs [1] = size_ins;
3215 mono_emit_calli (cfg, mono_method_signature (bzero_method), iargs, bzero_ins, NULL, NULL);
3219 n = mono_class_value_size (klass, &align);
3221 if (n <= sizeof (gpointer) * 5) {
3222 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
3225 memset_method = get_memset_method ();
3227 EMIT_NEW_ICONST (cfg, iargs [1], 0);
3228 EMIT_NEW_ICONST (cfg, iargs [2], n);
3229 mono_emit_method_call (cfg, memset_method, iargs, NULL);
3234 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
3236 MonoInst *this = NULL;
3238 g_assert (cfg->generic_sharing_context);
3240 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
3241 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
3242 !method->klass->valuetype)
3243 EMIT_NEW_ARGLOAD (cfg, this, 0);
3245 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
3246 MonoInst *mrgctx_loc, *mrgctx_var;
3249 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
3251 mrgctx_loc = mono_get_vtable_var (cfg);
3252 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
3255 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
3256 MonoInst *vtable_loc, *vtable_var;
3260 vtable_loc = mono_get_vtable_var (cfg);
3261 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
3263 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
3264 MonoInst *mrgctx_var = vtable_var;
3267 vtable_reg = alloc_preg (cfg);
3268 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, G_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
3269 vtable_var->type = STACK_PTR;
3277 vtable_reg = alloc_preg (cfg);
3278 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3283 static MonoJumpInfoRgctxEntry *
3284 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, MonoRgctxInfoType info_type)
3286 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
3287 res->method = method;
3288 res->in_mrgctx = in_mrgctx;
3289 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
3290 res->data->type = patch_type;
3291 res->data->data.target = patch_data;
3292 res->info_type = info_type;
3297 static inline MonoInst*
3298 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3300 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
3304 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
3305 MonoClass *klass, MonoRgctxInfoType rgctx_type)
3307 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
3308 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3310 return emit_rgctx_fetch (cfg, rgctx, entry);
3314 emit_get_rgctx_sig (MonoCompile *cfg, int context_used,
3315 MonoMethodSignature *sig, MonoRgctxInfoType rgctx_type)
3317 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_SIGNATURE, sig, rgctx_type);
3318 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3320 return emit_rgctx_fetch (cfg, rgctx, entry);
3324 emit_get_rgctx_gsharedvt_call (MonoCompile *cfg, int context_used,
3325 MonoMethodSignature *sig, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3327 MonoJumpInfoGSharedVtCall *call_info;
3328 MonoJumpInfoRgctxEntry *entry;
3331 call_info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoGSharedVtCall));
3332 call_info->sig = sig;
3333 call_info->method = cmethod;
3335 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_CALL, call_info, rgctx_type);
3336 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3338 return emit_rgctx_fetch (cfg, rgctx, entry);
3343 emit_get_rgctx_gsharedvt_method (MonoCompile *cfg, int context_used,
3344 MonoMethod *cmethod, MonoGSharedVtMethodInfo *info)
3346 MonoJumpInfoRgctxEntry *entry;
3349 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_METHOD, info, MONO_RGCTX_INFO_METHOD_GSHAREDVT_INFO);
3350 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3352 return emit_rgctx_fetch (cfg, rgctx, entry);
3356 * emit_get_rgctx_method:
3358 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
3359 * normal constants, else emit a load from the rgctx.
3362 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
3363 MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3365 if (!context_used) {
3368 switch (rgctx_type) {
3369 case MONO_RGCTX_INFO_METHOD:
3370 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
3372 case MONO_RGCTX_INFO_METHOD_RGCTX:
3373 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
3376 g_assert_not_reached ();
3379 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
3380 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3382 return emit_rgctx_fetch (cfg, rgctx, entry);
3387 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
3388 MonoClassField *field, MonoRgctxInfoType rgctx_type)
3390 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
3391 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3393 return emit_rgctx_fetch (cfg, rgctx, entry);
3397 get_gsharedvt_info_slot (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3399 MonoGSharedVtMethodInfo *info = cfg->gsharedvt_info;
3400 MonoRuntimeGenericContextInfoTemplate *template;
3405 for (i = 0; i < info->num_entries; ++i) {
3406 MonoRuntimeGenericContextInfoTemplate *otemplate = &info->entries [i];
3408 if (otemplate->info_type == rgctx_type && otemplate->data == data && rgctx_type != MONO_RGCTX_INFO_LOCAL_OFFSET)
3412 if (info->num_entries == info->count_entries) {
3413 MonoRuntimeGenericContextInfoTemplate *new_entries;
3414 int new_count_entries = info->count_entries ? info->count_entries * 2 : 16;
3416 new_entries = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * new_count_entries);
3418 memcpy (new_entries, info->entries, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
3419 info->entries = new_entries;
3420 info->count_entries = new_count_entries;
3423 idx = info->num_entries;
3424 template = &info->entries [idx];
3425 template->info_type = rgctx_type;
3426 template->data = data;
3428 info->num_entries ++;
3434 * emit_get_gsharedvt_info:
3436 * This is similar to emit_get_rgctx_.., but loads the data from the gsharedvt info var instead of calling an rgctx fetch trampoline.
3439 emit_get_gsharedvt_info (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3444 idx = get_gsharedvt_info_slot (cfg, data, rgctx_type);
3445 /* Load info->entries [idx] */
3446 dreg = alloc_preg (cfg);
3447 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, cfg->gsharedvt_info_var->dreg, G_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
3453 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type)
3455 return emit_get_gsharedvt_info (cfg, &klass->byval_arg, rgctx_type);
3459 * On return the caller must check @klass for load errors.
3462 emit_generic_class_init (MonoCompile *cfg, MonoClass *klass)
3464 MonoInst *vtable_arg;
3468 context_used = mini_class_check_context_used (cfg, klass);
3471 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
3472 klass, MONO_RGCTX_INFO_VTABLE);
3474 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3478 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
3481 if (COMPILE_LLVM (cfg))
3482 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline_llvm, &vtable_arg);
3484 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable_arg);
3485 #ifdef MONO_ARCH_VTABLE_REG
3486 mono_call_inst_add_outarg_reg (cfg, call, vtable_arg->dreg, MONO_ARCH_VTABLE_REG, FALSE);
3487 cfg->uses_vtable_reg = TRUE;
3494 emit_seq_point (MonoCompile *cfg, MonoMethod *method, guint8* ip, gboolean intr_loc, gboolean nonempty_stack)
3498 if (cfg->gen_seq_points && cfg->method == method) {
3499 NEW_SEQ_POINT (cfg, ins, ip - cfg->header->code, intr_loc);
3501 ins->flags |= MONO_INST_NONEMPTY_STACK;
3502 MONO_ADD_INS (cfg->cbb, ins);
3507 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg, gboolean null_check, MonoBasicBlock **out_bblock)
3509 if (mini_get_debug_options ()->better_cast_details) {
3510 int to_klass_reg = alloc_preg (cfg);
3511 int vtable_reg = alloc_preg (cfg);
3512 int klass_reg = alloc_preg (cfg);
3513 MonoBasicBlock *is_null_bb = NULL;
3517 NEW_BBLOCK (cfg, is_null_bb);
3519 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3520 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3523 tls_get = mono_get_jit_tls_intrinsic (cfg);
3525 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3529 MONO_ADD_INS (cfg->cbb, tls_get);
3530 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3531 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3533 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3534 MONO_EMIT_NEW_PCONST (cfg, to_klass_reg, klass);
3535 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3538 MONO_START_BB (cfg, is_null_bb);
3540 *out_bblock = cfg->cbb;
3546 reset_cast_details (MonoCompile *cfg)
3548 /* Reset the variables holding the cast details */
3549 if (mini_get_debug_options ()->better_cast_details) {
3550 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3552 MONO_ADD_INS (cfg->cbb, tls_get);
3553 /* It is enough to reset the from field */
3554 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3559 * On return the caller must check @array_class for load errors
3562 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
3564 int vtable_reg = alloc_preg (cfg);
3567 context_used = mini_class_check_context_used (cfg, array_class);
3569 save_cast_details (cfg, array_class, obj->dreg, FALSE, NULL);
3571 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3573 if (cfg->opt & MONO_OPT_SHARED) {
3574 int class_reg = alloc_preg (cfg);
3575 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3576 if (cfg->compile_aot) {
3577 int klass_reg = alloc_preg (cfg);
3578 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
3579 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
3581 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
3583 } else if (context_used) {
3584 MonoInst *vtable_ins;
3586 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
3587 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
3589 if (cfg->compile_aot) {
3593 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3595 vt_reg = alloc_preg (cfg);
3596 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
3597 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
3600 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3602 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
3606 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
3608 reset_cast_details (cfg);
3612 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
3613 * generic code is generated.
3616 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
3618 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
3621 MonoInst *rgctx, *addr;
3623 /* FIXME: What if the class is shared? We might not
3624 have to get the address of the method from the
3626 addr = emit_get_rgctx_method (cfg, context_used, method,
3627 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3629 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3631 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
3633 gboolean pass_vtable, pass_mrgctx;
3634 MonoInst *rgctx_arg = NULL;
3636 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
3637 g_assert (!pass_mrgctx);
3640 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3643 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
3646 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
3651 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
3655 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
3656 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
3657 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
3658 int rank_reg = alloc_dreg (cfg ,STACK_I4);
3660 obj_reg = sp [0]->dreg;
3661 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3662 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3664 /* FIXME: generics */
3665 g_assert (klass->rank == 0);
3668 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
3669 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3671 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3672 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, element_class));
3675 MonoInst *element_class;
3677 /* This assertion is from the unboxcast insn */
3678 g_assert (klass->rank == 0);
3680 element_class = emit_get_rgctx_klass (cfg, context_used,
3681 klass->element_class, MONO_RGCTX_INFO_KLASS);
3683 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
3684 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3686 save_cast_details (cfg, klass->element_class, obj_reg, FALSE, NULL);
3687 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
3688 reset_cast_details (cfg);
3691 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), obj_reg, sizeof (MonoObject));
3692 MONO_ADD_INS (cfg->cbb, add);
3693 add->type = STACK_MP;
3700 handle_unbox_gsharedvt (MonoCompile *cfg, MonoClass *klass, MonoInst *obj, MonoBasicBlock **out_cbb)
3702 MonoInst *addr, *klass_inst, *is_ref, *args[16];
3703 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
3707 klass_inst = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_KLASS);
3713 args [1] = klass_inst;
3716 obj = mono_emit_jit_icall (cfg, mono_object_castclass_unbox, args);
3718 NEW_BBLOCK (cfg, is_ref_bb);
3719 NEW_BBLOCK (cfg, is_nullable_bb);
3720 NEW_BBLOCK (cfg, end_bb);
3721 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
3722 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 1);
3723 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
3725 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 2);
3726 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
3728 /* This will contain either the address of the unboxed vtype, or an address of the temporary where the ref is stored */
3729 addr_reg = alloc_dreg (cfg, STACK_MP);
3733 NEW_BIALU_IMM (cfg, addr, OP_ADD_IMM, addr_reg, obj->dreg, sizeof (MonoObject));
3734 MONO_ADD_INS (cfg->cbb, addr);
3736 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3739 MONO_START_BB (cfg, is_ref_bb);
3741 /* Save the ref to a temporary */
3742 dreg = alloc_ireg (cfg);
3743 EMIT_NEW_VARLOADA_VREG (cfg, addr, dreg, &klass->byval_arg);
3744 addr->dreg = addr_reg;
3745 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, obj->dreg);
3746 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3749 MONO_START_BB (cfg, is_nullable_bb);
3752 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_NULLABLE_CLASS_UNBOX);
3753 MonoInst *unbox_call;
3754 MonoMethodSignature *unbox_sig;
3757 var = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
3759 unbox_sig = mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
3760 unbox_sig->ret = &klass->byval_arg;
3761 unbox_sig->param_count = 1;
3762 unbox_sig->params [0] = &mono_defaults.object_class->byval_arg;
3763 unbox_call = mono_emit_calli (cfg, unbox_sig, &obj, addr, NULL, NULL);
3765 EMIT_NEW_VARLOADA_VREG (cfg, addr, unbox_call->dreg, &klass->byval_arg);
3766 addr->dreg = addr_reg;
3769 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3772 MONO_START_BB (cfg, end_bb);
3775 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr_reg, 0);
3777 *out_cbb = cfg->cbb;
3783 * Returns NULL and set the cfg exception on error.
3786 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
3788 MonoInst *iargs [2];
3794 MonoInst *iargs [2];
3796 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (klass, for_box);
3798 if (cfg->opt & MONO_OPT_SHARED)
3799 rgctx_info = MONO_RGCTX_INFO_KLASS;
3801 rgctx_info = MONO_RGCTX_INFO_VTABLE;
3802 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
3804 if (cfg->opt & MONO_OPT_SHARED) {
3805 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3807 alloc_ftn = mono_object_new;
3810 alloc_ftn = mono_object_new_specific;
3813 if (managed_alloc && !(cfg->opt & MONO_OPT_SHARED))
3814 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3816 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3819 if (cfg->opt & MONO_OPT_SHARED) {
3820 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3821 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
3823 alloc_ftn = mono_object_new;
3824 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
3825 /* This happens often in argument checking code, eg. throw new FooException... */
3826 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
3827 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
3828 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
3830 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3831 MonoMethod *managed_alloc = NULL;
3835 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
3836 cfg->exception_ptr = klass;
3840 #ifndef MONO_CROSS_COMPILE
3841 managed_alloc = mono_gc_get_managed_allocator (klass, for_box);
3844 if (managed_alloc) {
3845 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3846 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3848 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
3850 guint32 lw = vtable->klass->instance_size;
3851 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
3852 EMIT_NEW_ICONST (cfg, iargs [0], lw);
3853 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
3856 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3860 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3864 * Returns NULL and set the cfg exception on error.
3867 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used, MonoBasicBlock **out_cbb)
3869 MonoInst *alloc, *ins;
3871 *out_cbb = cfg->cbb;
3873 if (mono_class_is_nullable (klass)) {
3874 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
3877 /* FIXME: What if the class is shared? We might not
3878 have to get the method address from the RGCTX. */
3879 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
3880 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3881 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3883 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
3885 gboolean pass_vtable, pass_mrgctx;
3886 MonoInst *rgctx_arg = NULL;
3888 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
3889 g_assert (!pass_mrgctx);
3892 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3895 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
3898 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
3902 if (mini_is_gsharedvt_klass (cfg, klass)) {
3903 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
3904 MonoInst *res, *is_ref, *src_var, *addr;
3907 dreg = alloc_ireg (cfg);
3909 NEW_BBLOCK (cfg, is_ref_bb);
3910 NEW_BBLOCK (cfg, is_nullable_bb);
3911 NEW_BBLOCK (cfg, end_bb);
3912 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
3913 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 1);
3914 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
3916 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 2);
3917 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
3920 alloc = handle_alloc (cfg, klass, TRUE, context_used);
3923 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3924 ins->opcode = OP_STOREV_MEMBASE;
3926 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, alloc->dreg);
3927 res->type = STACK_OBJ;
3929 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3932 MONO_START_BB (cfg, is_ref_bb);
3933 addr_reg = alloc_ireg (cfg);
3935 /* val is a vtype, so has to load the value manually */
3936 src_var = get_vreg_to_inst (cfg, val->dreg);
3938 src_var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, val->dreg);
3939 EMIT_NEW_VARLOADA (cfg, addr, src_var, src_var->inst_vtype);
3940 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, addr->dreg, 0);
3941 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3944 MONO_START_BB (cfg, is_nullable_bb);
3947 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass,
3948 MONO_RGCTX_INFO_NULLABLE_CLASS_BOX);
3950 MonoMethodSignature *box_sig;
3953 * klass is Nullable<T>, need to call Nullable<T>.Box () using a gsharedvt signature, but we cannot
3954 * construct that method at JIT time, so have to do things by hand.
3956 box_sig = mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
3957 box_sig->ret = &mono_defaults.object_class->byval_arg;
3958 box_sig->param_count = 1;
3959 box_sig->params [0] = &klass->byval_arg;
3960 box_call = mono_emit_calli (cfg, box_sig, &val, addr, NULL, NULL);
3961 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, box_call->dreg);
3962 res->type = STACK_OBJ;
3966 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3968 MONO_START_BB (cfg, end_bb);
3970 *out_cbb = cfg->cbb;
3974 alloc = handle_alloc (cfg, klass, TRUE, context_used);
3978 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3985 mini_class_has_reference_variant_generic_argument (MonoCompile *cfg, MonoClass *klass, int context_used)
3988 MonoGenericContainer *container;
3989 MonoGenericInst *ginst;
3991 if (klass->generic_class) {
3992 container = klass->generic_class->container_class->generic_container;
3993 ginst = klass->generic_class->context.class_inst;
3994 } else if (klass->generic_container && context_used) {
3995 container = klass->generic_container;
3996 ginst = container->context.class_inst;
4001 for (i = 0; i < container->type_argc; ++i) {
4003 if (!(mono_generic_container_get_param_info (container, i)->flags & (MONO_GEN_PARAM_VARIANT|MONO_GEN_PARAM_COVARIANT)))
4005 type = ginst->type_argv [i];
4006 if (mini_type_is_reference (cfg, type))
4012 // FIXME: This doesn't work yet (class libs tests fail?)
4013 #define is_complex_isinst(klass) (TRUE || (klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || mono_class_is_marshalbyref (klass) || (klass->flags & TYPE_ATTRIBUTE_SEALED) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
4016 emit_castclass_with_cache (MonoCompile *cfg, MonoClass *klass, MonoInst **args, MonoBasicBlock **out_bblock)
4018 MonoMethod *mono_castclass;
4021 mono_castclass = mono_marshal_get_castclass_with_cache ();
4023 save_cast_details (cfg, klass, args [0]->dreg, TRUE, out_bblock);
4024 res = mono_emit_method_call (cfg, mono_castclass, args, NULL);
4025 reset_cast_details (cfg);
4031 * Returns NULL and set the cfg exception on error.
4034 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
4036 MonoBasicBlock *is_null_bb;
4037 int obj_reg = src->dreg;
4038 int vtable_reg = alloc_preg (cfg);
4039 MonoInst *klass_inst = NULL;
4044 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
4045 MonoInst *cache_ins;
4047 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
4052 /* klass - it's the second element of the cache entry*/
4053 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
4056 args [2] = cache_ins;
4058 return emit_castclass_with_cache (cfg, klass, args, NULL);
4061 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
4064 NEW_BBLOCK (cfg, is_null_bb);
4066 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4067 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
4069 save_cast_details (cfg, klass, obj_reg, FALSE, NULL);
4071 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4072 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4073 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
4075 int klass_reg = alloc_preg (cfg);
4077 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4079 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4080 /* the remoting code is broken, access the class for now */
4081 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4082 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4084 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4085 cfg->exception_ptr = klass;
4088 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4090 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4091 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4093 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
4095 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4096 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
4100 MONO_START_BB (cfg, is_null_bb);
4102 reset_cast_details (cfg);
4108 * Returns NULL and set the cfg exception on error.
4111 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
4114 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
4115 int obj_reg = src->dreg;
4116 int vtable_reg = alloc_preg (cfg);
4117 int res_reg = alloc_ireg_ref (cfg);
4118 MonoInst *klass_inst = NULL;
4123 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
4124 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
4125 MonoInst *cache_ins;
4127 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
4132 /* klass - it's the second element of the cache entry*/
4133 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
4136 args [2] = cache_ins;
4138 return mono_emit_method_call (cfg, mono_isinst, args, NULL);
4141 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
4144 NEW_BBLOCK (cfg, is_null_bb);
4145 NEW_BBLOCK (cfg, false_bb);
4146 NEW_BBLOCK (cfg, end_bb);
4148 /* Do the assignment at the beginning, so the other assignment can be if converted */
4149 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
4150 ins->type = STACK_OBJ;
4153 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4154 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
4156 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4158 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4159 g_assert (!context_used);
4160 /* the is_null_bb target simply copies the input register to the output */
4161 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
4163 int klass_reg = alloc_preg (cfg);
4166 int rank_reg = alloc_preg (cfg);
4167 int eclass_reg = alloc_preg (cfg);
4169 g_assert (!context_used);
4170 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
4171 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
4172 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4173 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4174 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
4175 if (klass->cast_class == mono_defaults.object_class) {
4176 int parent_reg = alloc_preg (cfg);
4177 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
4178 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
4179 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4180 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4181 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
4182 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
4183 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4184 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4185 } else if (klass->cast_class == mono_defaults.enum_class) {
4186 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4187 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4188 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
4189 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4191 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
4192 /* Check that the object is a vector too */
4193 int bounds_reg = alloc_preg (cfg);
4194 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
4195 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
4196 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4199 /* the is_null_bb target simply copies the input register to the output */
4200 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4202 } else if (mono_class_is_nullable (klass)) {
4203 g_assert (!context_used);
4204 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4205 /* the is_null_bb target simply copies the input register to the output */
4206 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
4208 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4209 g_assert (!context_used);
4210 /* the remoting code is broken, access the class for now */
4211 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4212 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4214 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4215 cfg->exception_ptr = klass;
4218 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4220 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4221 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4223 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4224 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
4226 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4227 /* the is_null_bb target simply copies the input register to the output */
4228 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
4233 MONO_START_BB (cfg, false_bb);
4235 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
4236 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4238 MONO_START_BB (cfg, is_null_bb);
4240 MONO_START_BB (cfg, end_bb);
4246 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4248 /* This opcode takes as input an object reference and a class, and returns:
4249 0) if the object is an instance of the class,
4250 1) if the object is not instance of the class,
4251 2) if the object is a proxy whose type cannot be determined */
4254 #ifndef DISABLE_REMOTING
4255 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
4257 MonoBasicBlock *true_bb, *false_bb, *end_bb;
4259 int obj_reg = src->dreg;
4260 int dreg = alloc_ireg (cfg);
4262 #ifndef DISABLE_REMOTING
4263 int klass_reg = alloc_preg (cfg);
4266 NEW_BBLOCK (cfg, true_bb);
4267 NEW_BBLOCK (cfg, false_bb);
4268 NEW_BBLOCK (cfg, end_bb);
4269 #ifndef DISABLE_REMOTING
4270 NEW_BBLOCK (cfg, false2_bb);
4271 NEW_BBLOCK (cfg, no_proxy_bb);
4274 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4275 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
4277 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4278 #ifndef DISABLE_REMOTING
4279 NEW_BBLOCK (cfg, interface_fail_bb);
4282 tmp_reg = alloc_preg (cfg);
4283 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4284 #ifndef DISABLE_REMOTING
4285 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
4286 MONO_START_BB (cfg, interface_fail_bb);
4287 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4289 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
4291 tmp_reg = alloc_preg (cfg);
4292 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4293 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4294 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
4296 mini_emit_iface_cast (cfg, tmp_reg, klass, false_bb, true_bb);
4299 #ifndef DISABLE_REMOTING
4300 tmp_reg = alloc_preg (cfg);
4301 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4302 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4304 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4305 tmp_reg = alloc_preg (cfg);
4306 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4307 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4309 tmp_reg = alloc_preg (cfg);
4310 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4311 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4312 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4314 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
4315 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
4317 MONO_START_BB (cfg, no_proxy_bb);
4319 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
4321 g_error ("transparent proxy support is disabled while trying to JIT code that uses it");
4325 MONO_START_BB (cfg, false_bb);
4327 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4328 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4330 #ifndef DISABLE_REMOTING
4331 MONO_START_BB (cfg, false2_bb);
4333 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
4334 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4337 MONO_START_BB (cfg, true_bb);
4339 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4341 MONO_START_BB (cfg, end_bb);
4344 MONO_INST_NEW (cfg, ins, OP_ICONST);
4346 ins->type = STACK_I4;
4352 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4354 /* This opcode takes as input an object reference and a class, and returns:
4355 0) if the object is an instance of the class,
4356 1) if the object is a proxy whose type cannot be determined
4357 an InvalidCastException exception is thrown otherwhise*/
4360 #ifndef DISABLE_REMOTING
4361 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
4363 MonoBasicBlock *ok_result_bb;
4365 int obj_reg = src->dreg;
4366 int dreg = alloc_ireg (cfg);
4367 int tmp_reg = alloc_preg (cfg);
4369 #ifndef DISABLE_REMOTING
4370 int klass_reg = alloc_preg (cfg);
4371 NEW_BBLOCK (cfg, end_bb);
4374 NEW_BBLOCK (cfg, ok_result_bb);
4376 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4377 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
4379 save_cast_details (cfg, klass, obj_reg, FALSE, NULL);
4381 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4382 #ifndef DISABLE_REMOTING
4383 NEW_BBLOCK (cfg, interface_fail_bb);
4385 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4386 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
4387 MONO_START_BB (cfg, interface_fail_bb);
4388 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4390 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
4392 tmp_reg = alloc_preg (cfg);
4393 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4394 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4395 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
4397 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4398 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4400 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4401 mini_emit_iface_cast (cfg, tmp_reg, klass, NULL, NULL);
4402 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, ok_result_bb);
4405 #ifndef DISABLE_REMOTING
4406 NEW_BBLOCK (cfg, no_proxy_bb);
4408 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4409 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4410 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4412 tmp_reg = alloc_preg (cfg);
4413 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4414 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4416 tmp_reg = alloc_preg (cfg);
4417 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4418 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4419 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4421 NEW_BBLOCK (cfg, fail_1_bb);
4423 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
4425 MONO_START_BB (cfg, fail_1_bb);
4427 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4428 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4430 MONO_START_BB (cfg, no_proxy_bb);
4432 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
4434 g_error ("Transparent proxy support is disabled while trying to JIT code that uses it");
4438 MONO_START_BB (cfg, ok_result_bb);
4440 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4442 #ifndef DISABLE_REMOTING
4443 MONO_START_BB (cfg, end_bb);
4447 MONO_INST_NEW (cfg, ins, OP_ICONST);
4449 ins->type = STACK_I4;
4455 * Returns NULL and set the cfg exception on error.
4457 static G_GNUC_UNUSED MonoInst*
4458 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used)
4462 gpointer *trampoline;
4463 MonoInst *obj, *method_ins, *tramp_ins;
4467 obj = handle_alloc (cfg, klass, FALSE, 0);
4471 /* Inline the contents of mono_delegate_ctor */
4473 /* Set target field */
4474 /* Optimize away setting of NULL target */
4475 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0)) {
4476 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
4477 if (cfg->gen_write_barriers) {
4478 dreg = alloc_preg (cfg);
4479 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target));
4480 emit_write_barrier (cfg, ptr, target);
4484 /* Set method field */
4485 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
4486 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
4487 if (cfg->gen_write_barriers) {
4488 dreg = alloc_preg (cfg);
4489 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method));
4490 emit_write_barrier (cfg, ptr, method_ins);
4493 * To avoid looking up the compiled code belonging to the target method
4494 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
4495 * store it, and we fill it after the method has been compiled.
4497 if (!method->dynamic && !(cfg->opt & MONO_OPT_SHARED)) {
4498 MonoInst *code_slot_ins;
4501 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
4503 domain = mono_domain_get ();
4504 mono_domain_lock (domain);
4505 if (!domain_jit_info (domain)->method_code_hash)
4506 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
4507 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
4509 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
4510 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
4512 mono_domain_unlock (domain);
4514 if (cfg->compile_aot)
4515 EMIT_NEW_AOTCONST (cfg, code_slot_ins, MONO_PATCH_INFO_METHOD_CODE_SLOT, method);
4517 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
4519 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
4522 /* Set invoke_impl field */
4523 if (cfg->compile_aot) {
4524 MonoClassMethodPair *del_tramp;
4526 del_tramp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoClassMethodPair));
4527 del_tramp->klass = klass;
4528 del_tramp->method = context_used ? NULL : method;
4529 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, del_tramp);
4531 trampoline = mono_create_delegate_trampoline_with_method (cfg->domain, klass, context_used ? NULL : method);
4532 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
4534 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
4536 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
4542 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
4544 MonoJitICallInfo *info;
4546 /* Need to register the icall so it gets an icall wrapper */
4547 info = mono_get_array_new_va_icall (rank);
4549 cfg->flags |= MONO_CFG_HAS_VARARGS;
4551 /* mono_array_new_va () needs a vararg calling convention */
4552 cfg->disable_llvm = TRUE;
4554 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
4555 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
4559 mono_emit_load_got_addr (MonoCompile *cfg)
4561 MonoInst *getaddr, *dummy_use;
4563 if (!cfg->got_var || cfg->got_var_allocated)
4566 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
4567 getaddr->cil_code = cfg->header->code;
4568 getaddr->dreg = cfg->got_var->dreg;
4570 /* Add it to the start of the first bblock */
4571 if (cfg->bb_entry->code) {
4572 getaddr->next = cfg->bb_entry->code;
4573 cfg->bb_entry->code = getaddr;
4576 MONO_ADD_INS (cfg->bb_entry, getaddr);
4578 cfg->got_var_allocated = TRUE;
4581 * Add a dummy use to keep the got_var alive, since real uses might
4582 * only be generated by the back ends.
4583 * Add it to end_bblock, so the variable's lifetime covers the whole
4585 * It would be better to make the usage of the got var explicit in all
4586 * cases when the backend needs it (i.e. calls, throw etc.), so this
4587 * wouldn't be needed.
4589 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
4590 MONO_ADD_INS (cfg->bb_exit, dummy_use);
4593 static int inline_limit;
4594 static gboolean inline_limit_inited;
4597 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
4599 MonoMethodHeaderSummary header;
4601 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
4602 MonoMethodSignature *sig = mono_method_signature (method);
4606 if (cfg->generic_sharing_context)
4609 if (cfg->inline_depth > 10)
4612 #ifdef MONO_ARCH_HAVE_LMF_OPS
4613 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
4614 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
4615 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
4620 if (!mono_method_get_header_summary (method, &header))
4623 /*runtime, icall and pinvoke are checked by summary call*/
4624 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
4625 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
4626 (mono_class_is_marshalbyref (method->klass)) ||
4630 /* also consider num_locals? */
4631 /* Do the size check early to avoid creating vtables */
4632 if (!inline_limit_inited) {
4633 if (g_getenv ("MONO_INLINELIMIT"))
4634 inline_limit = atoi (g_getenv ("MONO_INLINELIMIT"));
4636 inline_limit = INLINE_LENGTH_LIMIT;
4637 inline_limit_inited = TRUE;
4639 if (header.code_size >= inline_limit && !(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
4643 * if we can initialize the class of the method right away, we do,
4644 * otherwise we don't allow inlining if the class needs initialization,
4645 * since it would mean inserting a call to mono_runtime_class_init()
4646 * inside the inlined code
4648 if (!(cfg->opt & MONO_OPT_SHARED)) {
4649 /* The AggressiveInlining hint is a good excuse to force that cctor to run. */
4650 if (method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING) {
4651 vtable = mono_class_vtable (cfg->domain, method->klass);
4654 if (!cfg->compile_aot)
4655 mono_runtime_class_init (vtable);
4656 } else if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
4657 if (cfg->run_cctors && method->klass->has_cctor) {
4658 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
4659 if (!method->klass->runtime_info)
4660 /* No vtable created yet */
4662 vtable = mono_class_vtable (cfg->domain, method->klass);
4665 /* This makes so that inline cannot trigger */
4666 /* .cctors: too many apps depend on them */
4667 /* running with a specific order... */
4668 if (! vtable->initialized)
4670 mono_runtime_class_init (vtable);
4672 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
4673 if (!method->klass->runtime_info)
4674 /* No vtable created yet */
4676 vtable = mono_class_vtable (cfg->domain, method->klass);
4679 if (!vtable->initialized)
4684 * If we're compiling for shared code
4685 * the cctor will need to be run at aot method load time, for example,
4686 * or at the end of the compilation of the inlining method.
4688 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
4693 * CAS - do not inline methods with declarative security
4694 * Note: this has to be before any possible return TRUE;
4696 if (mono_security_method_has_declsec (method))
4699 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
4700 if (mono_arch_is_soft_float ()) {
4702 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
4704 for (i = 0; i < sig->param_count; ++i)
4705 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
4714 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoClass *klass, MonoVTable *vtable)
4716 if (!cfg->compile_aot) {
4718 if (vtable->initialized)
4722 if (klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
4723 if (cfg->method == method)
4727 if (!mono_class_needs_cctor_run (klass, method))
4730 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (klass == method->klass))
4731 /* The initialization is already done before the method is called */
4738 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
4742 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
4745 if (mini_is_gsharedvt_variable_klass (cfg, klass)) {
4748 mono_class_init (klass);
4749 size = mono_class_array_element_size (klass);
4752 mult_reg = alloc_preg (cfg);
4753 array_reg = arr->dreg;
4754 index_reg = index->dreg;
4756 #if SIZEOF_REGISTER == 8
4757 /* The array reg is 64 bits but the index reg is only 32 */
4758 if (COMPILE_LLVM (cfg)) {
4760 index2_reg = index_reg;
4762 index2_reg = alloc_preg (cfg);
4763 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
4766 if (index->type == STACK_I8) {
4767 index2_reg = alloc_preg (cfg);
4768 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
4770 index2_reg = index_reg;
4775 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
4777 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4778 if (size == 1 || size == 2 || size == 4 || size == 8) {
4779 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
4781 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], G_STRUCT_OFFSET (MonoArray, vector));
4782 ins->klass = mono_class_get_element_class (klass);
4783 ins->type = STACK_MP;
4789 add_reg = alloc_ireg_mp (cfg);
4792 MonoInst *rgctx_ins;
4795 g_assert (cfg->generic_sharing_context);
4796 context_used = mini_class_check_context_used (cfg, klass);
4797 g_assert (context_used);
4798 rgctx_ins = emit_get_gsharedvt_info (cfg, &klass->byval_arg, MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE);
4799 MONO_EMIT_NEW_BIALU (cfg, OP_IMUL, mult_reg, index2_reg, rgctx_ins->dreg);
4801 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
4803 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
4804 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
4805 ins->klass = mono_class_get_element_class (klass);
4806 ins->type = STACK_MP;
4807 MONO_ADD_INS (cfg->cbb, ins);
4812 #ifndef MONO_ARCH_EMULATE_MUL_DIV
4814 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
4816 int bounds_reg = alloc_preg (cfg);
4817 int add_reg = alloc_ireg_mp (cfg);
4818 int mult_reg = alloc_preg (cfg);
4819 int mult2_reg = alloc_preg (cfg);
4820 int low1_reg = alloc_preg (cfg);
4821 int low2_reg = alloc_preg (cfg);
4822 int high1_reg = alloc_preg (cfg);
4823 int high2_reg = alloc_preg (cfg);
4824 int realidx1_reg = alloc_preg (cfg);
4825 int realidx2_reg = alloc_preg (cfg);
4826 int sum_reg = alloc_preg (cfg);
4827 int index1, index2, tmpreg;
4831 mono_class_init (klass);
4832 size = mono_class_array_element_size (klass);
4834 index1 = index_ins1->dreg;
4835 index2 = index_ins2->dreg;
4837 #if SIZEOF_REGISTER == 8
4838 /* The array reg is 64 bits but the index reg is only 32 */
4839 if (COMPILE_LLVM (cfg)) {
4842 tmpreg = alloc_preg (cfg);
4843 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index1);
4845 tmpreg = alloc_preg (cfg);
4846 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index2);
4850 // FIXME: Do we need to do something here for i8 indexes, like in ldelema_1_ins ?
4854 /* range checking */
4855 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
4856 arr->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
4858 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
4859 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4860 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
4861 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
4862 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
4863 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
4864 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4866 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
4867 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4868 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
4869 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
4870 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, length));
4871 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
4872 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4874 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
4875 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
4876 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
4877 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
4878 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
4880 ins->type = STACK_MP;
4882 MONO_ADD_INS (cfg->cbb, ins);
4889 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
4893 MonoMethod *addr_method;
4896 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
4899 return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], TRUE);
4901 #ifndef MONO_ARCH_EMULATE_MUL_DIV
4902 /* emit_ldelema_2 depends on OP_LMUL */
4903 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS)) {
4904 return mini_emit_ldelema_2_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], sp [2]);
4908 element_size = mono_class_array_element_size (cmethod->klass->element_class);
4909 addr_method = mono_marshal_get_array_address (rank, element_size);
4910 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
4915 static MonoBreakPolicy
4916 always_insert_breakpoint (MonoMethod *method)
4918 return MONO_BREAK_POLICY_ALWAYS;
4921 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
4924 * mono_set_break_policy:
4925 * policy_callback: the new callback function
4927 * Allow embedders to decide wherther to actually obey breakpoint instructions
4928 * (both break IL instructions and Debugger.Break () method calls), for example
4929 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
4930 * untrusted or semi-trusted code.
4932 * @policy_callback will be called every time a break point instruction needs to
4933 * be inserted with the method argument being the method that calls Debugger.Break()
4934 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
4935 * if it wants the breakpoint to not be effective in the given method.
4936 * #MONO_BREAK_POLICY_ALWAYS is the default.
4939 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
4941 if (policy_callback)
4942 break_policy_func = policy_callback;
4944 break_policy_func = always_insert_breakpoint;
4948 should_insert_brekpoint (MonoMethod *method) {
4949 switch (break_policy_func (method)) {
4950 case MONO_BREAK_POLICY_ALWAYS:
4952 case MONO_BREAK_POLICY_NEVER:
4954 case MONO_BREAK_POLICY_ON_DBG:
4955 g_warning ("mdb no longer supported");
4958 g_warning ("Incorrect value returned from break policy callback");
4963 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
4965 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
4967 MonoInst *addr, *store, *load;
4968 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
4970 /* the bounds check is already done by the callers */
4971 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
4973 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
4974 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
4975 if (mini_type_is_reference (cfg, fsig->params [2]))
4976 emit_write_barrier (cfg, addr, load);
4978 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
4979 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
4986 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
4988 return mini_type_is_reference (cfg, &klass->byval_arg);
4992 emit_array_store (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, gboolean safety_checks)
4994 if (safety_checks && generic_class_is_reference_type (cfg, klass) &&
4995 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
4996 MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class, 1);
4997 MonoMethod *helper = mono_marshal_get_virtual_stelemref (obj_array);
4998 MonoInst *iargs [3];
5001 mono_class_setup_vtable (obj_array);
5002 g_assert (helper->slot);
5004 if (sp [0]->type != STACK_OBJ)
5006 if (sp [2]->type != STACK_OBJ)
5013 return mono_emit_method_call (cfg, helper, iargs, sp [0]);
5017 if (mini_is_gsharedvt_variable_klass (cfg, klass)) {
5020 // FIXME-VT: OP_ICONST optimization
5021 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
5022 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5023 ins->opcode = OP_STOREV_MEMBASE;
5024 } else if (sp [1]->opcode == OP_ICONST) {
5025 int array_reg = sp [0]->dreg;
5026 int index_reg = sp [1]->dreg;
5027 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
5030 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
5031 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
5033 MonoInst *addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], safety_checks);
5034 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5035 if (generic_class_is_reference_type (cfg, klass))
5036 emit_write_barrier (cfg, addr, sp [2]);
5043 emit_array_unsafe_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
5048 eklass = mono_class_from_mono_type (fsig->params [2]);
5050 eklass = mono_class_from_mono_type (fsig->ret);
5053 return emit_array_store (cfg, eklass, args, FALSE);
5055 MonoInst *ins, *addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
5056 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &eklass->byval_arg, addr->dreg, 0);
5062 is_unsafe_mov_compatible (MonoClass *param_klass, MonoClass *return_klass)
5066 //Only allow for valuetypes
5067 if (!param_klass->valuetype || !return_klass->valuetype)
5071 if (param_klass->has_references || return_klass->has_references)
5074 /* Avoid mixing structs and primitive types/enums, they need to be handled differently in the JIT */
5075 if ((MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && !MONO_TYPE_ISSTRUCT (&return_klass->byval_arg)) ||
5076 (!MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && MONO_TYPE_ISSTRUCT (&return_klass->byval_arg)))
5079 if (param_klass->byval_arg.type == MONO_TYPE_R4 || param_klass->byval_arg.type == MONO_TYPE_R8 ||
5080 return_klass->byval_arg.type == MONO_TYPE_R4 || return_klass->byval_arg.type == MONO_TYPE_R8)
5083 //And have the same size
5084 if (mono_class_value_size (param_klass, &align) != mono_class_value_size (return_klass, &align))
5090 emit_array_unsafe_mov (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args)
5092 MonoClass *param_klass = mono_class_from_mono_type (fsig->params [0]);
5093 MonoClass *return_klass = mono_class_from_mono_type (fsig->ret);
5095 //Valuetypes that are semantically equivalent
5096 if (is_unsafe_mov_compatible (param_klass, return_klass))
5099 //Arrays of valuetypes that are semantically equivalent
5100 if (param_klass->rank == 1 && return_klass->rank == 1 && is_unsafe_mov_compatible (param_klass->element_class, return_klass->element_class))
5107 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5109 #ifdef MONO_ARCH_SIMD_INTRINSICS
5110 MonoInst *ins = NULL;
5112 if (cfg->opt & MONO_OPT_SIMD) {
5113 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
5119 return mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
5123 emit_memory_barrier (MonoCompile *cfg, int kind)
5125 MonoInst *ins = NULL;
5126 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
5127 MONO_ADD_INS (cfg->cbb, ins);
5128 ins->backend.memory_barrier_kind = kind;
5134 llvm_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5136 MonoInst *ins = NULL;
5139 /* The LLVM backend supports these intrinsics */
5140 if (cmethod->klass == mono_defaults.math_class) {
5141 if (strcmp (cmethod->name, "Sin") == 0) {
5143 } else if (strcmp (cmethod->name, "Cos") == 0) {
5145 } else if (strcmp (cmethod->name, "Sqrt") == 0) {
5147 } else if (strcmp (cmethod->name, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8) {
5152 MONO_INST_NEW (cfg, ins, opcode);
5153 ins->type = STACK_R8;
5154 ins->dreg = mono_alloc_freg (cfg);
5155 ins->sreg1 = args [0]->dreg;
5156 MONO_ADD_INS (cfg->cbb, ins);
5160 if (cfg->opt & MONO_OPT_CMOV) {
5161 if (strcmp (cmethod->name, "Min") == 0) {
5162 if (fsig->params [0]->type == MONO_TYPE_I4)
5164 if (fsig->params [0]->type == MONO_TYPE_U4)
5165 opcode = OP_IMIN_UN;
5166 else if (fsig->params [0]->type == MONO_TYPE_I8)
5168 else if (fsig->params [0]->type == MONO_TYPE_U8)
5169 opcode = OP_LMIN_UN;
5170 } else if (strcmp (cmethod->name, "Max") == 0) {
5171 if (fsig->params [0]->type == MONO_TYPE_I4)
5173 if (fsig->params [0]->type == MONO_TYPE_U4)
5174 opcode = OP_IMAX_UN;
5175 else if (fsig->params [0]->type == MONO_TYPE_I8)
5177 else if (fsig->params [0]->type == MONO_TYPE_U8)
5178 opcode = OP_LMAX_UN;
5183 MONO_INST_NEW (cfg, ins, opcode);
5184 ins->type = fsig->params [0]->type == MONO_TYPE_I4 ? STACK_I4 : STACK_I8;
5185 ins->dreg = mono_alloc_ireg (cfg);
5186 ins->sreg1 = args [0]->dreg;
5187 ins->sreg2 = args [1]->dreg;
5188 MONO_ADD_INS (cfg->cbb, ins);
5196 mini_emit_inst_for_sharable_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5198 if (cmethod->klass == mono_defaults.array_class) {
5199 if (strcmp (cmethod->name, "UnsafeStore") == 0)
5200 return emit_array_unsafe_access (cfg, fsig, args, TRUE);
5201 else if (strcmp (cmethod->name, "UnsafeLoad") == 0)
5202 return emit_array_unsafe_access (cfg, fsig, args, FALSE);
5203 else if (strcmp (cmethod->name, "UnsafeMov") == 0)
5204 return emit_array_unsafe_mov (cfg, fsig, args);
5211 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5213 MonoInst *ins = NULL;
5215 static MonoClass *runtime_helpers_class = NULL;
5216 if (! runtime_helpers_class)
5217 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
5218 "System.Runtime.CompilerServices", "RuntimeHelpers");
5220 if (cmethod->klass == mono_defaults.string_class) {
5221 if (strcmp (cmethod->name, "get_Chars") == 0) {
5222 int dreg = alloc_ireg (cfg);
5223 int index_reg = alloc_preg (cfg);
5224 int mult_reg = alloc_preg (cfg);
5225 int add_reg = alloc_preg (cfg);
5227 #if SIZEOF_REGISTER == 8
5228 /* The array reg is 64 bits but the index reg is only 32 */
5229 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
5231 index_reg = args [1]->dreg;
5233 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
5235 #if defined(TARGET_X86) || defined(TARGET_AMD64)
5236 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, G_STRUCT_OFFSET (MonoString, chars));
5237 add_reg = ins->dreg;
5238 /* Avoid a warning */
5240 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5243 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
5244 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
5245 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5246 add_reg, G_STRUCT_OFFSET (MonoString, chars));
5248 type_from_op (ins, NULL, NULL);
5250 } else if (strcmp (cmethod->name, "get_Length") == 0) {
5251 int dreg = alloc_ireg (cfg);
5252 /* Decompose later to allow more optimizations */
5253 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
5254 ins->type = STACK_I4;
5255 ins->flags |= MONO_INST_FAULT;
5256 cfg->cbb->has_array_access = TRUE;
5257 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
5260 } else if (strcmp (cmethod->name, "InternalSetChar") == 0) {
5261 int mult_reg = alloc_preg (cfg);
5262 int add_reg = alloc_preg (cfg);
5264 /* The corlib functions check for oob already. */
5265 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
5266 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
5267 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, G_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
5268 return cfg->cbb->last_ins;
5271 } else if (cmethod->klass == mono_defaults.object_class) {
5273 if (strcmp (cmethod->name, "GetType") == 0) {
5274 int dreg = alloc_ireg_ref (cfg);
5275 int vt_reg = alloc_preg (cfg);
5276 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
5277 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, G_STRUCT_OFFSET (MonoVTable, type));
5278 type_from_op (ins, NULL, NULL);
5281 #if !defined(MONO_ARCH_EMULATE_MUL_DIV)
5282 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0 && !mono_gc_is_moving ()) {
5283 int dreg = alloc_ireg (cfg);
5284 int t1 = alloc_ireg (cfg);
5286 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
5287 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
5288 ins->type = STACK_I4;
5292 } else if (strcmp (cmethod->name, ".ctor") == 0) {
5293 MONO_INST_NEW (cfg, ins, OP_NOP);
5294 MONO_ADD_INS (cfg->cbb, ins);
5298 } else if (cmethod->klass == mono_defaults.array_class) {
5299 if (!cfg->gsharedvt && strcmp (cmethod->name + 1, "etGenericValueImpl") == 0)
5300 return emit_array_generic_access (cfg, fsig, args, *cmethod->name == 'S');
5302 #ifndef MONO_BIG_ARRAYS
5304 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
5307 if ((strcmp (cmethod->name, "GetLength") == 0 || strcmp (cmethod->name, "GetLowerBound") == 0) && args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
5308 int dreg = alloc_ireg (cfg);
5309 int bounds_reg = alloc_ireg_mp (cfg);
5310 MonoBasicBlock *end_bb, *szarray_bb;
5311 gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
5313 NEW_BBLOCK (cfg, end_bb);
5314 NEW_BBLOCK (cfg, szarray_bb);
5316 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
5317 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
5318 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
5319 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
5320 /* Non-szarray case */
5322 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5323 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
5325 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5326 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5327 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
5328 MONO_START_BB (cfg, szarray_bb);
5331 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5332 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
5334 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
5335 MONO_START_BB (cfg, end_bb);
5337 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
5338 ins->type = STACK_I4;
5344 if (cmethod->name [0] != 'g')
5347 if (strcmp (cmethod->name, "get_Rank") == 0) {
5348 int dreg = alloc_ireg (cfg);
5349 int vtable_reg = alloc_preg (cfg);
5350 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
5351 args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
5352 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
5353 vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
5354 type_from_op (ins, NULL, NULL);
5357 } else if (strcmp (cmethod->name, "get_Length") == 0) {
5358 int dreg = alloc_ireg (cfg);
5360 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5361 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
5362 type_from_op (ins, NULL, NULL);
5367 } else if (cmethod->klass == runtime_helpers_class) {
5369 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0) {
5370 EMIT_NEW_ICONST (cfg, ins, G_STRUCT_OFFSET (MonoString, chars));
5374 } else if (cmethod->klass == mono_defaults.thread_class) {
5375 if (strcmp (cmethod->name, "SpinWait_nop") == 0) {
5376 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
5377 MONO_ADD_INS (cfg->cbb, ins);
5379 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0) {
5380 return emit_memory_barrier (cfg, FullBarrier);
5382 } else if (cmethod->klass == mono_defaults.monitor_class) {
5384 /* FIXME this should be integrated to the check below once we support the trampoline version */
5385 #if defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
5386 if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 2) {
5387 MonoMethod *fast_method = NULL;
5389 /* Avoid infinite recursion */
5390 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN && !strcmp (cfg->method->name, "FastMonitorEnterV4"))
5393 fast_method = mono_monitor_get_fast_path (cmethod);
5397 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
5401 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
5402 if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 1) {
5405 if (COMPILE_LLVM (cfg)) {
5407 * Pass the argument normally, the LLVM backend will handle the
5408 * calling convention problems.
5410 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
5412 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
5413 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
5414 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
5415 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
5418 return (MonoInst*)call;
5419 } else if (strcmp (cmethod->name, "Exit") == 0) {
5422 if (COMPILE_LLVM (cfg)) {
5423 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
5425 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
5426 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
5427 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
5428 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
5431 return (MonoInst*)call;
5433 #elif defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
5435 MonoMethod *fast_method = NULL;
5437 /* Avoid infinite recursion */
5438 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN &&
5439 (strcmp (cfg->method->name, "FastMonitorEnter") == 0 ||
5440 strcmp (cfg->method->name, "FastMonitorExit") == 0))
5443 if ((strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 2) ||
5444 strcmp (cmethod->name, "Exit") == 0)
5445 fast_method = mono_monitor_get_fast_path (cmethod);
5449 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
5452 } else if (cmethod->klass->image == mono_defaults.corlib &&
5453 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
5454 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
5457 #if SIZEOF_REGISTER == 8
5458 if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
5459 /* 64 bit reads are already atomic */
5460 MONO_INST_NEW (cfg, ins, OP_LOADI8_MEMBASE);
5461 ins->dreg = mono_alloc_preg (cfg);
5462 ins->inst_basereg = args [0]->dreg;
5463 ins->inst_offset = 0;
5464 MONO_ADD_INS (cfg->cbb, ins);
5468 #ifdef MONO_ARCH_HAVE_ATOMIC_ADD
5469 if (strcmp (cmethod->name, "Increment") == 0) {
5470 MonoInst *ins_iconst;
5473 if (fsig->params [0]->type == MONO_TYPE_I4) {
5474 opcode = OP_ATOMIC_ADD_NEW_I4;
5475 cfg->has_atomic_add_new_i4 = TRUE;
5477 #if SIZEOF_REGISTER == 8
5478 else if (fsig->params [0]->type == MONO_TYPE_I8)
5479 opcode = OP_ATOMIC_ADD_NEW_I8;
5482 if (!mono_arch_opcode_supported (opcode))
5484 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
5485 ins_iconst->inst_c0 = 1;
5486 ins_iconst->dreg = mono_alloc_ireg (cfg);
5487 MONO_ADD_INS (cfg->cbb, ins_iconst);
5489 MONO_INST_NEW (cfg, ins, opcode);
5490 ins->dreg = mono_alloc_ireg (cfg);
5491 ins->inst_basereg = args [0]->dreg;
5492 ins->inst_offset = 0;
5493 ins->sreg2 = ins_iconst->dreg;
5494 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
5495 MONO_ADD_INS (cfg->cbb, ins);
5497 } else if (strcmp (cmethod->name, "Decrement") == 0) {
5498 MonoInst *ins_iconst;
5501 if (fsig->params [0]->type == MONO_TYPE_I4) {
5502 opcode = OP_ATOMIC_ADD_NEW_I4;
5503 cfg->has_atomic_add_new_i4 = TRUE;
5505 #if SIZEOF_REGISTER == 8
5506 else if (fsig->params [0]->type == MONO_TYPE_I8)
5507 opcode = OP_ATOMIC_ADD_NEW_I8;
5510 if (!mono_arch_opcode_supported (opcode))
5512 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
5513 ins_iconst->inst_c0 = -1;
5514 ins_iconst->dreg = mono_alloc_ireg (cfg);
5515 MONO_ADD_INS (cfg->cbb, ins_iconst);
5517 MONO_INST_NEW (cfg, ins, opcode);
5518 ins->dreg = mono_alloc_ireg (cfg);
5519 ins->inst_basereg = args [0]->dreg;
5520 ins->inst_offset = 0;
5521 ins->sreg2 = ins_iconst->dreg;
5522 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
5523 MONO_ADD_INS (cfg->cbb, ins);
5525 } else if (strcmp (cmethod->name, "Add") == 0) {
5528 if (fsig->params [0]->type == MONO_TYPE_I4) {
5529 opcode = OP_ATOMIC_ADD_NEW_I4;
5530 cfg->has_atomic_add_new_i4 = TRUE;
5532 #if SIZEOF_REGISTER == 8
5533 else if (fsig->params [0]->type == MONO_TYPE_I8)
5534 opcode = OP_ATOMIC_ADD_NEW_I8;
5537 if (!mono_arch_opcode_supported (opcode))
5539 MONO_INST_NEW (cfg, ins, opcode);
5540 ins->dreg = mono_alloc_ireg (cfg);
5541 ins->inst_basereg = args [0]->dreg;
5542 ins->inst_offset = 0;
5543 ins->sreg2 = args [1]->dreg;
5544 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
5545 MONO_ADD_INS (cfg->cbb, ins);
5548 #endif /* MONO_ARCH_HAVE_ATOMIC_ADD */
5550 #ifdef MONO_ARCH_HAVE_ATOMIC_EXCHANGE
5551 if (strcmp (cmethod->name, "Exchange") == 0) {
5553 gboolean is_ref = fsig->params [0]->type == MONO_TYPE_OBJECT;
5555 if (fsig->params [0]->type == MONO_TYPE_I4) {
5556 opcode = OP_ATOMIC_EXCHANGE_I4;
5557 cfg->has_atomic_exchange_i4 = TRUE;
5559 #if SIZEOF_REGISTER == 8
5560 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I8) ||
5561 (fsig->params [0]->type == MONO_TYPE_I))
5562 opcode = OP_ATOMIC_EXCHANGE_I8;
5564 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I)) {
5565 opcode = OP_ATOMIC_EXCHANGE_I4;
5566 cfg->has_atomic_exchange_i4 = TRUE;
5572 if (!mono_arch_opcode_supported (opcode))
5575 MONO_INST_NEW (cfg, ins, opcode);
5576 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : mono_alloc_ireg (cfg);
5577 ins->inst_basereg = args [0]->dreg;
5578 ins->inst_offset = 0;
5579 ins->sreg2 = args [1]->dreg;
5580 MONO_ADD_INS (cfg->cbb, ins);
5582 switch (fsig->params [0]->type) {
5584 ins->type = STACK_I4;
5588 ins->type = STACK_I8;
5590 case MONO_TYPE_OBJECT:
5591 ins->type = STACK_OBJ;
5594 g_assert_not_reached ();
5597 if (cfg->gen_write_barriers && is_ref)
5598 emit_write_barrier (cfg, args [0], args [1]);
5600 #endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
5602 #ifdef MONO_ARCH_HAVE_ATOMIC_CAS
5603 if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
5605 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [1]);
5606 if (fsig->params [1]->type == MONO_TYPE_I4)
5608 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I)
5609 size = sizeof (gpointer);
5610 else if (sizeof (gpointer) == 8 && fsig->params [1]->type == MONO_TYPE_I8)
5613 if (!mono_arch_opcode_supported (OP_ATOMIC_CAS_I4))
5615 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
5616 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
5617 ins->sreg1 = args [0]->dreg;
5618 ins->sreg2 = args [1]->dreg;
5619 ins->sreg3 = args [2]->dreg;
5620 ins->type = STACK_I4;
5621 MONO_ADD_INS (cfg->cbb, ins);
5622 cfg->has_atomic_cas_i4 = TRUE;
5623 } else if (size == 8) {
5624 if (!mono_arch_opcode_supported (OP_ATOMIC_CAS_I8))
5626 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I8);
5627 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
5628 ins->sreg1 = args [0]->dreg;
5629 ins->sreg2 = args [1]->dreg;
5630 ins->sreg3 = args [2]->dreg;
5631 ins->type = STACK_I8;
5632 MONO_ADD_INS (cfg->cbb, ins);
5634 /* g_assert_not_reached (); */
5636 if (cfg->gen_write_barriers && is_ref)
5637 emit_write_barrier (cfg, args [0], args [1]);
5639 #endif /* MONO_ARCH_HAVE_ATOMIC_CAS */
5641 if (strcmp (cmethod->name, "MemoryBarrier") == 0)
5642 ins = emit_memory_barrier (cfg, FullBarrier);
5646 } else if (cmethod->klass->image == mono_defaults.corlib) {
5647 if (cmethod->name [0] == 'B' && strcmp (cmethod->name, "Break") == 0
5648 && strcmp (cmethod->klass->name, "Debugger") == 0) {
5649 if (should_insert_brekpoint (cfg->method)) {
5650 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
5652 MONO_INST_NEW (cfg, ins, OP_NOP);
5653 MONO_ADD_INS (cfg->cbb, ins);
5657 if (cmethod->name [0] == 'g' && strcmp (cmethod->name, "get_IsRunningOnWindows") == 0
5658 && strcmp (cmethod->klass->name, "Environment") == 0) {
5660 EMIT_NEW_ICONST (cfg, ins, 1);
5662 EMIT_NEW_ICONST (cfg, ins, 0);
5666 } else if (cmethod->klass == mono_defaults.math_class) {
5668 * There is general branches code for Min/Max, but it does not work for
5670 * http://everything2.com/?node_id=1051618
5672 } else if ((!strcmp (cmethod->klass->image->assembly->aname.name, "MonoMac") || !strcmp (cmethod->klass->image->assembly->aname.name, "monotouch")) && !strcmp (cmethod->klass->name, "Selector") && !strcmp (cmethod->name, "GetHandle") && cfg->compile_aot && (args [0]->opcode == OP_GOT_ENTRY || args[0]->opcode == OP_AOTCONST)) {
5673 #ifdef MONO_ARCH_HAVE_OBJC_GET_SELECTOR
5675 MonoJumpInfoToken *ji;
5678 cfg->disable_llvm = TRUE;
5680 if (args [0]->opcode == OP_GOT_ENTRY) {
5681 pi = args [0]->inst_p1;
5682 g_assert (pi->opcode == OP_PATCH_INFO);
5683 g_assert ((int)pi->inst_p1 == MONO_PATCH_INFO_LDSTR);
5686 g_assert ((int)args [0]->inst_p1 == MONO_PATCH_INFO_LDSTR);
5687 ji = args [0]->inst_p0;
5690 NULLIFY_INS (args [0]);
5693 s = mono_ldstr (cfg->domain, ji->image, mono_metadata_token_index (ji->token));
5694 MONO_INST_NEW (cfg, ins, OP_OBJC_GET_SELECTOR);
5695 ins->dreg = mono_alloc_ireg (cfg);
5697 ins->inst_p0 = mono_string_to_utf8 (s);
5698 MONO_ADD_INS (cfg->cbb, ins);
5703 #ifdef MONO_ARCH_SIMD_INTRINSICS
5704 if (cfg->opt & MONO_OPT_SIMD) {
5705 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
5711 ins = mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
5715 if (COMPILE_LLVM (cfg)) {
5716 ins = llvm_emit_inst_for_method (cfg, cmethod, fsig, args);
5721 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
5725 * This entry point could be used later for arbitrary method
5728 inline static MonoInst*
5729 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
5730 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
5732 if (method->klass == mono_defaults.string_class) {
5733 /* managed string allocation support */
5734 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS) && !(cfg->opt & MONO_OPT_SHARED)) {
5735 MonoInst *iargs [2];
5736 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
5737 MonoMethod *managed_alloc = NULL;
5739 g_assert (vtable); /*Should not fail since it System.String*/
5740 #ifndef MONO_CROSS_COMPILE
5741 managed_alloc = mono_gc_get_managed_allocator (method->klass, FALSE);
5745 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
5746 iargs [1] = args [0];
5747 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
5754 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
5756 MonoInst *store, *temp;
5759 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
5760 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
5763 * FIXME: We should use *args++ = sp [0], but that would mean the arg
5764 * would be different than the MonoInst's used to represent arguments, and
5765 * the ldelema implementation can't deal with that.
5766 * Solution: When ldelema is used on an inline argument, create a var for
5767 * it, emit ldelema on that var, and emit the saving code below in
5768 * inline_method () if needed.
5770 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
5771 cfg->args [i] = temp;
5772 /* This uses cfg->args [i] which is set by the preceeding line */
5773 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
5774 store->cil_code = sp [0]->cil_code;
5779 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
5780 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
5782 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
5784 check_inline_called_method_name_limit (MonoMethod *called_method)
5787 static const char *limit = NULL;
5789 if (limit == NULL) {
5790 const char *limit_string = g_getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
5792 if (limit_string != NULL)
5793 limit = limit_string;
5798 if (limit [0] != '\0') {
5799 char *called_method_name = mono_method_full_name (called_method, TRUE);
5801 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
5802 g_free (called_method_name);
5804 //return (strncmp_result <= 0);
5805 return (strncmp_result == 0);
5812 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
5814 check_inline_caller_method_name_limit (MonoMethod *caller_method)
5817 static const char *limit = NULL;
5819 if (limit == NULL) {
5820 const char *limit_string = g_getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
5821 if (limit_string != NULL) {
5822 limit = limit_string;
5828 if (limit [0] != '\0') {
5829 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
5831 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
5832 g_free (caller_method_name);
5834 //return (strncmp_result <= 0);
5835 return (strncmp_result == 0);
5843 emit_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
5845 static double r8_0 = 0.0;
5849 rtype = mini_replace_type (rtype);
5853 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
5854 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
5855 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
5856 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
5857 MONO_EMIT_NEW_I8CONST (cfg, dreg, 0);
5858 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
5859 MONO_INST_NEW (cfg, ins, OP_R8CONST);
5860 ins->type = STACK_R8;
5861 ins->inst_p0 = (void*)&r8_0;
5863 MONO_ADD_INS (cfg->cbb, ins);
5864 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
5865 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
5866 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
5867 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (cfg, rtype)) {
5868 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
5870 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
5875 emit_dummy_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
5879 rtype = mini_replace_type (rtype);
5883 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_PCONST);
5884 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
5885 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_ICONST);
5886 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
5887 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_I8CONST);
5888 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
5889 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R8CONST);
5890 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
5891 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
5892 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
5893 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (cfg, rtype)) {
5894 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
5896 emit_init_rvar (cfg, dreg, rtype);
5900 /* If INIT is FALSE, emit dummy initialization statements to keep the IR valid */
5902 emit_init_local (MonoCompile *cfg, int local, MonoType *type, gboolean init)
5904 MonoInst *var = cfg->locals [local];
5905 if (COMPILE_SOFT_FLOAT (cfg)) {
5907 int reg = alloc_dreg (cfg, var->type);
5908 emit_init_rvar (cfg, reg, type);
5909 EMIT_NEW_LOCSTORE (cfg, store, local, cfg->cbb->last_ins);
5912 emit_init_rvar (cfg, var->dreg, type);
5914 emit_dummy_init_rvar (cfg, var->dreg, type);
5919 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
5920 guchar *ip, guint real_offset, GList *dont_inline, gboolean inline_always)
5922 MonoInst *ins, *rvar = NULL;
5923 MonoMethodHeader *cheader;
5924 MonoBasicBlock *ebblock, *sbblock;
5926 MonoMethod *prev_inlined_method;
5927 MonoInst **prev_locals, **prev_args;
5928 MonoType **prev_arg_types;
5929 guint prev_real_offset;
5930 GHashTable *prev_cbb_hash;
5931 MonoBasicBlock **prev_cil_offset_to_bb;
5932 MonoBasicBlock *prev_cbb;
5933 unsigned char* prev_cil_start;
5934 guint32 prev_cil_offset_to_bb_len;
5935 MonoMethod *prev_current_method;
5936 MonoGenericContext *prev_generic_context;
5937 gboolean ret_var_set, prev_ret_var_set, virtual = FALSE;
5939 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
5941 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
5942 if ((! inline_always) && ! check_inline_called_method_name_limit (cmethod))
5945 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
5946 if ((! inline_always) && ! check_inline_caller_method_name_limit (cfg->method))
5950 if (cfg->verbose_level > 2)
5951 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
5953 if (!cmethod->inline_info) {
5954 cfg->stat_inlineable_methods++;
5955 cmethod->inline_info = 1;
5958 /* allocate local variables */
5959 cheader = mono_method_get_header (cmethod);
5961 if (cheader == NULL || mono_loader_get_last_error ()) {
5962 MonoLoaderError *error = mono_loader_get_last_error ();
5965 mono_metadata_free_mh (cheader);
5966 if (inline_always && error)
5967 mono_cfg_set_exception (cfg, error->exception_type);
5969 mono_loader_clear_error ();
5973 /*Must verify before creating locals as it can cause the JIT to assert.*/
5974 if (mono_compile_is_broken (cfg, cmethod, FALSE)) {
5975 mono_metadata_free_mh (cheader);
5979 /* allocate space to store the return value */
5980 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
5981 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
5984 prev_locals = cfg->locals;
5985 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
5986 for (i = 0; i < cheader->num_locals; ++i)
5987 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
5989 /* allocate start and end blocks */
5990 /* This is needed so if the inline is aborted, we can clean up */
5991 NEW_BBLOCK (cfg, sbblock);
5992 sbblock->real_offset = real_offset;
5994 NEW_BBLOCK (cfg, ebblock);
5995 ebblock->block_num = cfg->num_bblocks++;
5996 ebblock->real_offset = real_offset;
5998 prev_args = cfg->args;
5999 prev_arg_types = cfg->arg_types;
6000 prev_inlined_method = cfg->inlined_method;
6001 cfg->inlined_method = cmethod;
6002 cfg->ret_var_set = FALSE;
6003 cfg->inline_depth ++;
6004 prev_real_offset = cfg->real_offset;
6005 prev_cbb_hash = cfg->cbb_hash;
6006 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
6007 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
6008 prev_cil_start = cfg->cil_start;
6009 prev_cbb = cfg->cbb;
6010 prev_current_method = cfg->current_method;
6011 prev_generic_context = cfg->generic_context;
6012 prev_ret_var_set = cfg->ret_var_set;
6014 if (*ip == CEE_CALLVIRT && !(cmethod->flags & METHOD_ATTRIBUTE_STATIC))
6017 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, dont_inline, sp, real_offset, virtual);
6019 ret_var_set = cfg->ret_var_set;
6021 cfg->inlined_method = prev_inlined_method;
6022 cfg->real_offset = prev_real_offset;
6023 cfg->cbb_hash = prev_cbb_hash;
6024 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
6025 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
6026 cfg->cil_start = prev_cil_start;
6027 cfg->locals = prev_locals;
6028 cfg->args = prev_args;
6029 cfg->arg_types = prev_arg_types;
6030 cfg->current_method = prev_current_method;
6031 cfg->generic_context = prev_generic_context;
6032 cfg->ret_var_set = prev_ret_var_set;
6033 cfg->inline_depth --;
6035 if ((costs >= 0 && costs < 60) || inline_always) {
6036 if (cfg->verbose_level > 2)
6037 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
6039 cfg->stat_inlined_methods++;
6041 /* always add some code to avoid block split failures */
6042 MONO_INST_NEW (cfg, ins, OP_NOP);
6043 MONO_ADD_INS (prev_cbb, ins);
6045 prev_cbb->next_bb = sbblock;
6046 link_bblock (cfg, prev_cbb, sbblock);
6049 * Get rid of the begin and end bblocks if possible to aid local
6052 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
6054 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
6055 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
6057 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
6058 MonoBasicBlock *prev = ebblock->in_bb [0];
6059 mono_merge_basic_blocks (cfg, prev, ebblock);
6061 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
6062 mono_merge_basic_blocks (cfg, prev_cbb, prev);
6063 cfg->cbb = prev_cbb;
6067 * Its possible that the rvar is set in some prev bblock, but not in others.
6073 for (i = 0; i < ebblock->in_count; ++i) {
6074 bb = ebblock->in_bb [i];
6076 if (bb->last_ins && bb->last_ins->opcode == OP_NOT_REACHED) {
6079 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
6089 * If the inlined method contains only a throw, then the ret var is not
6090 * set, so set it to a dummy value.
6093 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
6095 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
6098 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
6101 if (cfg->verbose_level > 2)
6102 printf ("INLINE ABORTED %s (cost %d)\n", mono_method_full_name (cmethod, TRUE), costs);
6103 cfg->exception_type = MONO_EXCEPTION_NONE;
6104 mono_loader_clear_error ();
6106 /* This gets rid of the newly added bblocks */
6107 cfg->cbb = prev_cbb;
6109 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
6114 * Some of these comments may well be out-of-date.
6115 * Design decisions: we do a single pass over the IL code (and we do bblock
6116 * splitting/merging in the few cases when it's required: a back jump to an IL
6117 * address that was not already seen as bblock starting point).
6118 * Code is validated as we go (full verification is still better left to metadata/verify.c).
6119 * Complex operations are decomposed in simpler ones right away. We need to let the
6120 * arch-specific code peek and poke inside this process somehow (except when the
6121 * optimizations can take advantage of the full semantic info of coarse opcodes).
6122 * All the opcodes of the form opcode.s are 'normalized' to opcode.
6123 * MonoInst->opcode initially is the IL opcode or some simplification of that
6124 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
6125 * opcode with value bigger than OP_LAST.
6126 * At this point the IR can be handed over to an interpreter, a dumb code generator
6127 * or to the optimizing code generator that will translate it to SSA form.
6129 * Profiling directed optimizations.
6130 * We may compile by default with few or no optimizations and instrument the code
6131 * or the user may indicate what methods to optimize the most either in a config file
6132 * or through repeated runs where the compiler applies offline the optimizations to
6133 * each method and then decides if it was worth it.
6136 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
6137 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
6138 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
6139 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
6140 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
6141 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
6142 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
6143 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) {cfg->exception_ptr = klass; LOAD_ERROR;}
6145 /* offset from br.s -> br like opcodes */
6146 #define BIG_BRANCH_OFFSET 13
6149 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
6151 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
6153 return b == NULL || b == bb;
6157 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
6159 unsigned char *ip = start;
6160 unsigned char *target;
6163 MonoBasicBlock *bblock;
6164 const MonoOpcode *opcode;
6167 cli_addr = ip - start;
6168 i = mono_opcode_value ((const guint8 **)&ip, end);
6171 opcode = &mono_opcodes [i];
6172 switch (opcode->argument) {
6173 case MonoInlineNone:
6176 case MonoInlineString:
6177 case MonoInlineType:
6178 case MonoInlineField:
6179 case MonoInlineMethod:
6182 case MonoShortInlineR:
6189 case MonoShortInlineVar:
6190 case MonoShortInlineI:
6193 case MonoShortInlineBrTarget:
6194 target = start + cli_addr + 2 + (signed char)ip [1];
6195 GET_BBLOCK (cfg, bblock, target);
6198 GET_BBLOCK (cfg, bblock, ip);
6200 case MonoInlineBrTarget:
6201 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
6202 GET_BBLOCK (cfg, bblock, target);
6205 GET_BBLOCK (cfg, bblock, ip);
6207 case MonoInlineSwitch: {
6208 guint32 n = read32 (ip + 1);
6211 cli_addr += 5 + 4 * n;
6212 target = start + cli_addr;
6213 GET_BBLOCK (cfg, bblock, target);
6215 for (j = 0; j < n; ++j) {
6216 target = start + cli_addr + (gint32)read32 (ip);
6217 GET_BBLOCK (cfg, bblock, target);
6227 g_assert_not_reached ();
6230 if (i == CEE_THROW) {
6231 unsigned char *bb_start = ip - 1;
6233 /* Find the start of the bblock containing the throw */
6235 while ((bb_start >= start) && !bblock) {
6236 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
6240 bblock->out_of_line = 1;
6250 static inline MonoMethod *
6251 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
6255 if (m->wrapper_type != MONO_WRAPPER_NONE) {
6256 method = mono_method_get_wrapper_data (m, token);
6258 method = mono_class_inflate_generic_method (method, context);
6260 method = mono_get_method_full (m->klass->image, token, klass, context);
6266 static inline MonoMethod *
6267 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
6269 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
6271 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
6277 static inline MonoClass*
6278 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
6282 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6283 klass = mono_method_get_wrapper_data (method, token);
6285 klass = mono_class_inflate_generic_class (klass, context);
6287 klass = mono_class_get_full (method->klass->image, token, context);
6290 mono_class_init (klass);
6294 static inline MonoMethodSignature*
6295 mini_get_signature (MonoMethod *method, guint32 token, MonoGenericContext *context)
6297 MonoMethodSignature *fsig;
6299 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6302 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
6304 fsig = mono_inflate_generic_signature (fsig, context, &error);
6306 g_assert (mono_error_ok (&error));
6309 fsig = mono_metadata_parse_signature (method->klass->image, token);
6315 * Returns TRUE if the JIT should abort inlining because "callee"
6316 * is influenced by security attributes.
6319 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
6323 if ((cfg->method != caller) && mono_security_method_has_declsec (callee)) {
6327 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
6328 if (result == MONO_JIT_SECURITY_OK)
6331 if (result == MONO_JIT_LINKDEMAND_ECMA) {
6332 /* Generate code to throw a SecurityException before the actual call/link */
6333 MonoSecurityManager *secman = mono_security_manager_get_methods ();
6336 NEW_ICONST (cfg, args [0], 4);
6337 NEW_METHODCONST (cfg, args [1], caller);
6338 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
6339 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
6340 /* don't hide previous results */
6341 mono_cfg_set_exception (cfg, MONO_EXCEPTION_SECURITY_LINKDEMAND);
6342 cfg->exception_data = result;
6350 throw_exception (void)
6352 static MonoMethod *method = NULL;
6355 MonoSecurityManager *secman = mono_security_manager_get_methods ();
6356 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
6363 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
6365 MonoMethod *thrower = throw_exception ();
6368 EMIT_NEW_PCONST (cfg, args [0], ex);
6369 mono_emit_method_call (cfg, thrower, args, NULL);
6373 * Return the original method is a wrapper is specified. We can only access
6374 * the custom attributes from the original method.
6377 get_original_method (MonoMethod *method)
6379 if (method->wrapper_type == MONO_WRAPPER_NONE)
6382 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
6383 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
6386 /* in other cases we need to find the original method */
6387 return mono_marshal_method_from_wrapper (method);
6391 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
6392 MonoBasicBlock *bblock, unsigned char *ip)
6394 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
6395 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
6397 emit_throw_exception (cfg, ex);
6401 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
6402 MonoBasicBlock *bblock, unsigned char *ip)
6404 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
6405 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
6407 emit_throw_exception (cfg, ex);
6411 * Check that the IL instructions at ip are the array initialization
6412 * sequence and return the pointer to the data and the size.
6415 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
6418 * newarr[System.Int32]
6420 * ldtoken field valuetype ...
6421 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
6423 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
6424 guint32 token = read32 (ip + 7);
6425 guint32 field_token = read32 (ip + 2);
6426 guint32 field_index = field_token & 0xffffff;
6428 const char *data_ptr;
6430 MonoMethod *cmethod;
6431 MonoClass *dummy_class;
6432 MonoClassField *field = mono_field_from_token (method->klass->image, field_token, &dummy_class, NULL);
6438 *out_field_token = field_token;
6440 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
6443 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
6445 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
6446 case MONO_TYPE_BOOLEAN:
6450 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
6451 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
6452 case MONO_TYPE_CHAR:
6469 if (size > mono_type_size (field->type, &dummy_align))
6472 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
6473 if (!method->klass->image->dynamic) {
6474 field_index = read32 (ip + 2) & 0xffffff;
6475 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
6476 data_ptr = mono_image_rva_map (method->klass->image, rva);
6477 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
6478 /* for aot code we do the lookup on load */
6479 if (aot && data_ptr)
6480 return GUINT_TO_POINTER (rva);
6482 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
6484 data_ptr = mono_field_get_data (field);
6492 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
6494 char *method_fname = mono_method_full_name (method, TRUE);
6496 MonoMethodHeader *header = mono_method_get_header (method);
6498 if (header->code_size == 0)
6499 method_code = g_strdup ("method body is empty.");
6501 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
6502 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
6503 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
6504 g_free (method_fname);
6505 g_free (method_code);
6506 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
6510 set_exception_object (MonoCompile *cfg, MonoException *exception)
6512 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OBJECT_SUPPLIED);
6513 MONO_GC_REGISTER_ROOT_SINGLE (cfg->exception_ptr);
6514 cfg->exception_ptr = exception;
6518 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
6521 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
6522 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
6523 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
6524 /* Optimize reg-reg moves away */
6526 * Can't optimize other opcodes, since sp[0] might point to
6527 * the last ins of a decomposed opcode.
6529 sp [0]->dreg = (cfg)->locals [n]->dreg;
6531 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
6536 * ldloca inhibits many optimizations so try to get rid of it in common
6539 static inline unsigned char *
6540 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
6550 local = read16 (ip + 2);
6554 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
6555 /* From the INITOBJ case */
6556 token = read32 (ip + 2);
6557 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
6558 CHECK_TYPELOAD (klass);
6559 type = mini_replace_type (&klass->byval_arg);
6560 emit_init_local (cfg, local, type, TRUE);
6568 is_exception_class (MonoClass *class)
6571 if (class == mono_defaults.exception_class)
6573 class = class->parent;
6579 * is_jit_optimizer_disabled:
6581 * Determine whenever M's assembly has a DebuggableAttribute with the
6582 * IsJITOptimizerDisabled flag set.
6585 is_jit_optimizer_disabled (MonoMethod *m)
6587 MonoAssembly *ass = m->klass->image->assembly;
6588 MonoCustomAttrInfo* attrs;
6589 static MonoClass *klass;
6591 gboolean val = FALSE;
6594 if (ass->jit_optimizer_disabled_inited)
6595 return ass->jit_optimizer_disabled;
6598 klass = mono_class_from_name (mono_defaults.corlib, "System.Diagnostics", "DebuggableAttribute");
6601 ass->jit_optimizer_disabled = FALSE;
6602 mono_memory_barrier ();
6603 ass->jit_optimizer_disabled_inited = TRUE;
6607 attrs = mono_custom_attrs_from_assembly (ass);
6609 for (i = 0; i < attrs->num_attrs; ++i) {
6610 MonoCustomAttrEntry *attr = &attrs->attrs [i];
6613 MonoMethodSignature *sig;
6615 if (!attr->ctor || attr->ctor->klass != klass)
6617 /* Decode the attribute. See reflection.c */
6618 len = attr->data_size;
6619 p = (const char*)attr->data;
6620 g_assert (read16 (p) == 0x0001);
6623 // FIXME: Support named parameters
6624 sig = mono_method_signature (attr->ctor);
6625 if (sig->param_count != 2 || sig->params [0]->type != MONO_TYPE_BOOLEAN || sig->params [1]->type != MONO_TYPE_BOOLEAN)
6627 /* Two boolean arguments */
6631 mono_custom_attrs_free (attrs);
6634 ass->jit_optimizer_disabled = val;
6635 mono_memory_barrier ();
6636 ass->jit_optimizer_disabled_inited = TRUE;
6642 is_supported_tail_call (MonoCompile *cfg, MonoMethod *method, MonoMethod *cmethod, MonoMethodSignature *fsig, int call_opcode)
6644 gboolean supported_tail_call;
6647 #ifdef MONO_ARCH_HAVE_OP_TAIL_CALL
6648 supported_tail_call = mono_arch_tail_call_supported (cfg, mono_method_signature (method), mono_method_signature (cmethod));
6650 supported_tail_call = mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret);
6653 for (i = 0; i < fsig->param_count; ++i) {
6654 if (fsig->params [i]->byref || fsig->params [i]->type == MONO_TYPE_PTR || fsig->params [i]->type == MONO_TYPE_FNPTR)
6655 /* These can point to the current method's stack */
6656 supported_tail_call = FALSE;
6658 if (fsig->hasthis && cmethod->klass->valuetype)
6659 /* this might point to the current method's stack */
6660 supported_tail_call = FALSE;
6661 if (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)
6662 supported_tail_call = FALSE;
6663 if (cfg->method->save_lmf)
6664 supported_tail_call = FALSE;
6665 if (cmethod->wrapper_type && cmethod->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
6666 supported_tail_call = FALSE;
6667 if (call_opcode != CEE_CALL)
6668 supported_tail_call = FALSE;
6670 /* Debugging support */
6672 if (supported_tail_call) {
6673 if (!mono_debug_count ())
6674 supported_tail_call = FALSE;
6678 return supported_tail_call;
6681 /* the JIT intercepts ldflda instructions to the tlsdata field in ThreadLocal<T> and redirects
6682 * it to the thread local value based on the tls_offset field. Every other kind of access to
6683 * the field causes an assert.
6686 is_magic_tls_access (MonoClassField *field)
6688 if (strcmp (field->name, "tlsdata"))
6690 if (strcmp (field->parent->name, "ThreadLocal`1"))
6692 return field->parent->image == mono_defaults.corlib;
6695 /* emits the code needed to access a managed tls var (like ThreadStatic)
6696 * with the value of the tls offset in offset_reg. thread_ins represents the MonoInternalThread
6697 * pointer for the current thread.
6698 * Returns the MonoInst* representing the address of the tls var.
6701 emit_managed_static_data_access (MonoCompile *cfg, MonoInst *thread_ins, int offset_reg)
6704 int static_data_reg, array_reg, dreg;
6705 int offset2_reg, idx_reg;
6706 // inlined access to the tls data
6707 // idx = (offset >> 24) - 1;
6708 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
6709 static_data_reg = alloc_ireg (cfg);
6710 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, G_STRUCT_OFFSET (MonoInternalThread, static_data));
6711 idx_reg = alloc_ireg (cfg);
6712 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
6713 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
6714 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
6715 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
6716 array_reg = alloc_ireg (cfg);
6717 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
6718 offset2_reg = alloc_ireg (cfg);
6719 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
6720 dreg = alloc_ireg (cfg);
6721 EMIT_NEW_BIALU (cfg, addr, OP_PADD, dreg, array_reg, offset2_reg);
6726 * redirect access to the tlsdata field to the tls var given by the tls_offset field.
6727 * this address is cached per-method in cached_tls_addr.
6730 create_magic_tls_access (MonoCompile *cfg, MonoClassField *tls_field, MonoInst **cached_tls_addr, MonoInst *thread_local)
6732 MonoInst *load, *addr, *temp, *store, *thread_ins;
6733 MonoClassField *offset_field;
6735 if (*cached_tls_addr) {
6736 EMIT_NEW_TEMPLOAD (cfg, addr, (*cached_tls_addr)->inst_c0);
6739 thread_ins = mono_get_thread_intrinsic (cfg);
6740 offset_field = mono_class_get_field_from_name (tls_field->parent, "tls_offset");
6742 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, offset_field->type, thread_local->dreg, offset_field->offset);
6744 MONO_ADD_INS (cfg->cbb, thread_ins);
6746 MonoMethod *thread_method;
6747 thread_method = mono_class_get_method_from_name (mono_get_thread_class(), "CurrentInternalThread_internal", 0);
6748 thread_ins = mono_emit_method_call (cfg, thread_method, NULL, NULL);
6750 addr = emit_managed_static_data_access (cfg, thread_ins, load->dreg);
6751 addr->klass = mono_class_from_mono_type (tls_field->type);
6752 addr->type = STACK_MP;
6753 *cached_tls_addr = temp = mono_compile_create_var (cfg, type_from_stack_type (addr), OP_LOCAL);
6754 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, addr);
6756 EMIT_NEW_TEMPLOAD (cfg, addr, temp->inst_c0);
6761 * mono_method_to_ir:
6763 * Translate the .net IL into linear IR.
6766 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
6767 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
6768 guint inline_offset, gboolean is_virtual_call)
6771 MonoInst *ins, **sp, **stack_start;
6772 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
6773 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
6774 MonoMethod *cmethod, *method_definition;
6775 MonoInst **arg_array;
6776 MonoMethodHeader *header;
6778 guint32 token, ins_flag;
6780 MonoClass *constrained_call = NULL;
6781 unsigned char *ip, *end, *target, *err_pos;
6782 MonoMethodSignature *sig;
6783 MonoGenericContext *generic_context = NULL;
6784 MonoGenericContainer *generic_container = NULL;
6785 MonoType **param_types;
6786 int i, n, start_new_bblock, dreg;
6787 int num_calls = 0, inline_costs = 0;
6788 int breakpoint_id = 0;
6790 MonoBoolean security, pinvoke;
6791 MonoSecurityManager* secman = NULL;
6792 MonoDeclSecurityActions actions;
6793 GSList *class_inits = NULL;
6794 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
6796 gboolean init_locals, seq_points, skip_dead_blocks;
6797 gboolean disable_inline, sym_seq_points = FALSE;
6798 MonoInst *cached_tls_addr = NULL;
6799 MonoDebugMethodInfo *minfo;
6800 MonoBitSet *seq_point_locs = NULL;
6801 MonoBitSet *seq_point_set_locs = NULL;
6803 disable_inline = is_jit_optimizer_disabled (method);
6805 /* serialization and xdomain stuff may need access to private fields and methods */
6806 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
6807 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
6808 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
6809 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
6810 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
6811 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
6813 dont_verify |= mono_security_smcs_hack_enabled ();
6815 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
6816 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
6817 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
6818 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
6819 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_STELEMREF;
6821 image = method->klass->image;
6822 header = mono_method_get_header (method);
6824 MonoLoaderError *error;
6826 if ((error = mono_loader_get_last_error ())) {
6827 mono_cfg_set_exception (cfg, error->exception_type);
6829 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
6830 cfg->exception_message = g_strdup_printf ("Missing or incorrect header for method %s", cfg->method->name);
6832 goto exception_exit;
6834 generic_container = mono_method_get_generic_container (method);
6835 sig = mono_method_signature (method);
6836 num_args = sig->hasthis + sig->param_count;
6837 ip = (unsigned char*)header->code;
6838 cfg->cil_start = ip;
6839 end = ip + header->code_size;
6840 cfg->stat_cil_code_size += header->code_size;
6842 seq_points = cfg->gen_seq_points && cfg->method == method;
6843 #ifdef PLATFORM_ANDROID
6844 seq_points &= cfg->method->wrapper_type == MONO_WRAPPER_NONE;
6847 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
6848 /* We could hit a seq point before attaching to the JIT (#8338) */
6852 if (cfg->gen_seq_points && cfg->method == method) {
6853 minfo = mono_debug_lookup_method (method);
6855 int i, n_il_offsets;
6859 mono_debug_symfile_get_line_numbers_full (minfo, NULL, NULL, &n_il_offsets, &il_offsets, &line_numbers, NULL, NULL);
6860 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
6861 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
6862 sym_seq_points = TRUE;
6863 for (i = 0; i < n_il_offsets; ++i) {
6864 if (il_offsets [i] < header->code_size)
6865 mono_bitset_set_fast (seq_point_locs, il_offsets [i]);
6867 g_free (il_offsets);
6868 g_free (line_numbers);
6873 * Methods without init_locals set could cause asserts in various passes
6874 * (#497220). To work around this, we emit dummy initialization opcodes
6875 * (OP_DUMMY_ICONST etc.) which generate no code. These are only supported
6876 * on some platforms.
6878 if ((cfg->opt & MONO_OPT_UNSAFE) && ARCH_HAVE_DUMMY_INIT)
6879 init_locals = header->init_locals;
6883 method_definition = method;
6884 while (method_definition->is_inflated) {
6885 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
6886 method_definition = imethod->declaring;
6889 /* SkipVerification is not allowed if core-clr is enabled */
6890 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
6892 dont_verify_stloc = TRUE;
6895 if (sig->is_inflated)
6896 generic_context = mono_method_get_context (method);
6897 else if (generic_container)
6898 generic_context = &generic_container->context;
6899 cfg->generic_context = generic_context;
6901 if (!cfg->generic_sharing_context)
6902 g_assert (!sig->has_type_parameters);
6904 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
6905 g_assert (method->is_inflated);
6906 g_assert (mono_method_get_context (method)->method_inst);
6908 if (method->is_inflated && mono_method_get_context (method)->method_inst)
6909 g_assert (sig->generic_param_count);
6911 if (cfg->method == method) {
6912 cfg->real_offset = 0;
6914 cfg->real_offset = inline_offset;
6917 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
6918 cfg->cil_offset_to_bb_len = header->code_size;
6920 cfg->current_method = method;
6922 if (cfg->verbose_level > 2)
6923 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
6925 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
6927 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
6928 for (n = 0; n < sig->param_count; ++n)
6929 param_types [n + sig->hasthis] = sig->params [n];
6930 cfg->arg_types = param_types;
6932 dont_inline = g_list_prepend (dont_inline, method);
6933 if (cfg->method == method) {
6935 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
6936 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
6939 NEW_BBLOCK (cfg, start_bblock);
6940 cfg->bb_entry = start_bblock;
6941 start_bblock->cil_code = NULL;
6942 start_bblock->cil_length = 0;
6943 #if defined(__native_client_codegen__)
6944 MONO_INST_NEW (cfg, ins, OP_NACL_GC_SAFE_POINT);
6945 ins->dreg = alloc_dreg (cfg, STACK_I4);
6946 MONO_ADD_INS (start_bblock, ins);
6950 NEW_BBLOCK (cfg, end_bblock);
6951 cfg->bb_exit = end_bblock;
6952 end_bblock->cil_code = NULL;
6953 end_bblock->cil_length = 0;
6954 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
6955 g_assert (cfg->num_bblocks == 2);
6957 arg_array = cfg->args;
6959 if (header->num_clauses) {
6960 cfg->spvars = g_hash_table_new (NULL, NULL);
6961 cfg->exvars = g_hash_table_new (NULL, NULL);
6963 /* handle exception clauses */
6964 for (i = 0; i < header->num_clauses; ++i) {
6965 MonoBasicBlock *try_bb;
6966 MonoExceptionClause *clause = &header->clauses [i];
6967 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
6968 try_bb->real_offset = clause->try_offset;
6969 try_bb->try_start = TRUE;
6970 try_bb->region = ((i + 1) << 8) | clause->flags;
6971 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
6972 tblock->real_offset = clause->handler_offset;
6973 tblock->flags |= BB_EXCEPTION_HANDLER;
6976 * Linking the try block with the EH block hinders inlining as we won't be able to
6977 * merge the bblocks from inlining and produce an artificial hole for no good reason.
6979 if (COMPILE_LLVM (cfg))
6980 link_bblock (cfg, try_bb, tblock);
6982 if (*(ip + clause->handler_offset) == CEE_POP)
6983 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
6985 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
6986 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
6987 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
6988 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
6989 MONO_ADD_INS (tblock, ins);
6991 if (seq_points && clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY) {
6992 /* finally clauses already have a seq point */
6993 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
6994 MONO_ADD_INS (tblock, ins);
6997 /* todo: is a fault block unsafe to optimize? */
6998 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
6999 tblock->flags |= BB_EXCEPTION_UNSAFE;
7003 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
7005 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
7007 /* catch and filter blocks get the exception object on the stack */
7008 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
7009 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
7010 MonoInst *dummy_use;
7012 /* mostly like handle_stack_args (), but just sets the input args */
7013 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
7014 tblock->in_scount = 1;
7015 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
7016 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
7019 * Add a dummy use for the exvar so its liveness info will be
7023 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
7025 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
7026 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
7027 tblock->flags |= BB_EXCEPTION_HANDLER;
7028 tblock->real_offset = clause->data.filter_offset;
7029 tblock->in_scount = 1;
7030 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
7031 /* The filter block shares the exvar with the handler block */
7032 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
7033 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
7034 MONO_ADD_INS (tblock, ins);
7038 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
7039 clause->data.catch_class &&
7040 cfg->generic_sharing_context &&
7041 mono_class_check_context_used (clause->data.catch_class)) {
7043 * In shared generic code with catch
7044 * clauses containing type variables
7045 * the exception handling code has to
7046 * be able to get to the rgctx.
7047 * Therefore we have to make sure that
7048 * the vtable/mrgctx argument (for
7049 * static or generic methods) or the
7050 * "this" argument (for non-static
7051 * methods) are live.
7053 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
7054 mini_method_get_context (method)->method_inst ||
7055 method->klass->valuetype) {
7056 mono_get_vtable_var (cfg);
7058 MonoInst *dummy_use;
7060 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
7065 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
7066 cfg->cbb = start_bblock;
7067 cfg->args = arg_array;
7068 mono_save_args (cfg, sig, inline_args);
7071 /* FIRST CODE BLOCK */
7072 NEW_BBLOCK (cfg, bblock);
7073 bblock->cil_code = ip;
7077 ADD_BBLOCK (cfg, bblock);
7079 if (cfg->method == method) {
7080 breakpoint_id = mono_debugger_method_has_breakpoint (method);
7081 if (breakpoint_id) {
7082 MONO_INST_NEW (cfg, ins, OP_BREAK);
7083 MONO_ADD_INS (bblock, ins);
7087 if (mono_security_cas_enabled ())
7088 secman = mono_security_manager_get_methods ();
7090 security = (secman && mono_security_method_has_declsec (method));
7091 /* at this point having security doesn't mean we have any code to generate */
7092 if (security && (cfg->method == method)) {
7093 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
7094 * And we do not want to enter the next section (with allocation) if we
7095 * have nothing to generate */
7096 security = mono_declsec_get_demands (method, &actions);
7099 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
7100 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
7102 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
7103 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
7104 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
7106 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
7107 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
7111 mono_custom_attrs_free (custom);
7114 custom = mono_custom_attrs_from_class (wrapped->klass);
7115 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
7119 mono_custom_attrs_free (custom);
7122 /* not a P/Invoke after all */
7127 /* we use a separate basic block for the initialization code */
7128 NEW_BBLOCK (cfg, init_localsbb);
7129 cfg->bb_init = init_localsbb;
7130 init_localsbb->real_offset = cfg->real_offset;
7131 start_bblock->next_bb = init_localsbb;
7132 init_localsbb->next_bb = bblock;
7133 link_bblock (cfg, start_bblock, init_localsbb);
7134 link_bblock (cfg, init_localsbb, bblock);
7136 cfg->cbb = init_localsbb;
7138 if (cfg->gsharedvt && cfg->method == method) {
7139 MonoGSharedVtMethodInfo *info;
7140 MonoInst *var, *locals_var;
7143 info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoGSharedVtMethodInfo));
7144 info->method = cfg->method;
7145 info->count_entries = 16;
7146 info->entries = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
7147 cfg->gsharedvt_info = info;
7149 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
7150 /* prevent it from being register allocated */
7151 //var->flags |= MONO_INST_VOLATILE;
7152 cfg->gsharedvt_info_var = var;
7154 ins = emit_get_rgctx_gsharedvt_method (cfg, mini_method_check_context_used (cfg, method), method, info);
7155 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, var->dreg, ins->dreg);
7157 /* Allocate locals */
7158 locals_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
7159 /* prevent it from being register allocated */
7160 //locals_var->flags |= MONO_INST_VOLATILE;
7161 cfg->gsharedvt_locals_var = locals_var;
7163 dreg = alloc_ireg (cfg);
7164 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, dreg, var->dreg, G_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, locals_size));
7166 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
7167 ins->dreg = locals_var->dreg;
7169 MONO_ADD_INS (cfg->cbb, ins);
7170 cfg->gsharedvt_locals_var_ins = ins;
7172 cfg->flags |= MONO_CFG_HAS_ALLOCA;
7175 ins->flags |= MONO_INST_INIT;
7179 /* at this point we know, if security is TRUE, that some code needs to be generated */
7180 if (security && (cfg->method == method)) {
7183 cfg->stat_cas_demand_generation++;
7185 if (actions.demand.blob) {
7186 /* Add code for SecurityAction.Demand */
7187 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
7188 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
7189 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
7190 mono_emit_method_call (cfg, secman->demand, args, NULL);
7192 if (actions.noncasdemand.blob) {
7193 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
7194 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
7195 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
7196 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
7197 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
7198 mono_emit_method_call (cfg, secman->demand, args, NULL);
7200 if (actions.demandchoice.blob) {
7201 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
7202 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
7203 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
7204 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
7205 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
7209 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
7211 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
7214 if (mono_security_core_clr_enabled ()) {
7215 /* check if this is native code, e.g. an icall or a p/invoke */
7216 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
7217 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
7219 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
7220 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
7222 /* if this ia a native call then it can only be JITted from platform code */
7223 if ((icall || pinvk) && method->klass && method->klass->image) {
7224 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
7225 MonoException *ex = icall ? mono_get_exception_security () :
7226 mono_get_exception_method_access ();
7227 emit_throw_exception (cfg, ex);
7234 CHECK_CFG_EXCEPTION;
7236 if (header->code_size == 0)
7239 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
7244 if (cfg->method == method)
7245 mono_debug_init_method (cfg, bblock, breakpoint_id);
7247 for (n = 0; n < header->num_locals; ++n) {
7248 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
7253 /* We force the vtable variable here for all shared methods
7254 for the possibility that they might show up in a stack
7255 trace where their exact instantiation is needed. */
7256 if (cfg->generic_sharing_context && method == cfg->method) {
7257 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
7258 mini_method_get_context (method)->method_inst ||
7259 method->klass->valuetype) {
7260 mono_get_vtable_var (cfg);
7262 /* FIXME: Is there a better way to do this?
7263 We need the variable live for the duration
7264 of the whole method. */
7265 cfg->args [0]->flags |= MONO_INST_VOLATILE;
7269 /* add a check for this != NULL to inlined methods */
7270 if (is_virtual_call) {
7273 NEW_ARGLOAD (cfg, arg_ins, 0);
7274 MONO_ADD_INS (cfg->cbb, arg_ins);
7275 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
7278 skip_dead_blocks = !dont_verify;
7279 if (skip_dead_blocks) {
7280 original_bb = bb = mono_basic_block_split (method, &error);
7281 if (!mono_error_ok (&error)) {
7282 mono_error_cleanup (&error);
7288 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
7289 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
7292 start_new_bblock = 0;
7295 if (cfg->method == method)
7296 cfg->real_offset = ip - header->code;
7298 cfg->real_offset = inline_offset;
7303 if (start_new_bblock) {
7304 bblock->cil_length = ip - bblock->cil_code;
7305 if (start_new_bblock == 2) {
7306 g_assert (ip == tblock->cil_code);
7308 GET_BBLOCK (cfg, tblock, ip);
7310 bblock->next_bb = tblock;
7313 start_new_bblock = 0;
7314 for (i = 0; i < bblock->in_scount; ++i) {
7315 if (cfg->verbose_level > 3)
7316 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
7317 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
7321 g_slist_free (class_inits);
7324 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
7325 link_bblock (cfg, bblock, tblock);
7326 if (sp != stack_start) {
7327 handle_stack_args (cfg, stack_start, sp - stack_start);
7329 CHECK_UNVERIFIABLE (cfg);
7331 bblock->next_bb = tblock;
7334 for (i = 0; i < bblock->in_scount; ++i) {
7335 if (cfg->verbose_level > 3)
7336 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
7337 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
7340 g_slist_free (class_inits);
7345 if (skip_dead_blocks) {
7346 int ip_offset = ip - header->code;
7348 if (ip_offset == bb->end)
7352 int op_size = mono_opcode_size (ip, end);
7353 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
7355 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
7357 if (ip_offset + op_size == bb->end) {
7358 MONO_INST_NEW (cfg, ins, OP_NOP);
7359 MONO_ADD_INS (bblock, ins);
7360 start_new_bblock = 1;
7368 * Sequence points are points where the debugger can place a breakpoint.
7369 * Currently, we generate these automatically at points where the IL
7372 if (seq_points && ((sp == stack_start) || (sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code)))) {
7374 * Make methods interruptable at the beginning, and at the targets of
7375 * backward branches.
7376 * Also, do this at the start of every bblock in methods with clauses too,
7377 * to be able to handle instructions with inprecise control flow like
7379 * Backward branches are handled at the end of method-to-ir ().
7381 gboolean intr_loc = ip == header->code || (!cfg->cbb->last_ins && cfg->header->num_clauses);
7383 /* Avoid sequence points on empty IL like .volatile */
7384 // FIXME: Enable this
7385 //if (!(cfg->cbb->last_ins && cfg->cbb->last_ins->opcode == OP_SEQ_POINT)) {
7386 NEW_SEQ_POINT (cfg, ins, ip - header->code, intr_loc);
7387 if (sp != stack_start)
7388 ins->flags |= MONO_INST_NONEMPTY_STACK;
7389 MONO_ADD_INS (cfg->cbb, ins);
7392 mono_bitset_set_fast (seq_point_set_locs, ip - header->code);
7395 bblock->real_offset = cfg->real_offset;
7397 if ((cfg->method == method) && cfg->coverage_info) {
7398 guint32 cil_offset = ip - header->code;
7399 cfg->coverage_info->data [cil_offset].cil_code = ip;
7401 /* TODO: Use an increment here */
7402 #if defined(TARGET_X86)
7403 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
7404 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
7406 MONO_ADD_INS (cfg->cbb, ins);
7408 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
7409 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
7413 if (cfg->verbose_level > 3)
7414 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
7418 if (seq_points && !sym_seq_points && sp != stack_start) {
7420 * The C# compiler uses these nops to notify the JIT that it should
7421 * insert seq points.
7423 NEW_SEQ_POINT (cfg, ins, ip - header->code, FALSE);
7424 MONO_ADD_INS (cfg->cbb, ins);
7426 if (cfg->keep_cil_nops)
7427 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
7429 MONO_INST_NEW (cfg, ins, OP_NOP);
7431 MONO_ADD_INS (bblock, ins);
7434 if (should_insert_brekpoint (cfg->method)) {
7435 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
7437 MONO_INST_NEW (cfg, ins, OP_NOP);
7440 MONO_ADD_INS (bblock, ins);
7446 CHECK_STACK_OVF (1);
7447 n = (*ip)-CEE_LDARG_0;
7449 EMIT_NEW_ARGLOAD (cfg, ins, n);
7457 CHECK_STACK_OVF (1);
7458 n = (*ip)-CEE_LDLOC_0;
7460 EMIT_NEW_LOCLOAD (cfg, ins, n);
7469 n = (*ip)-CEE_STLOC_0;
7472 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
7474 emit_stloc_ir (cfg, sp, header, n);
7481 CHECK_STACK_OVF (1);
7484 EMIT_NEW_ARGLOAD (cfg, ins, n);
7490 CHECK_STACK_OVF (1);
7493 NEW_ARGLOADA (cfg, ins, n);
7494 MONO_ADD_INS (cfg->cbb, ins);
7504 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
7506 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
7511 CHECK_STACK_OVF (1);
7514 EMIT_NEW_LOCLOAD (cfg, ins, n);
7518 case CEE_LDLOCA_S: {
7519 unsigned char *tmp_ip;
7521 CHECK_STACK_OVF (1);
7522 CHECK_LOCAL (ip [1]);
7524 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
7530 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
7539 CHECK_LOCAL (ip [1]);
7540 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
7542 emit_stloc_ir (cfg, sp, header, ip [1]);
7547 CHECK_STACK_OVF (1);
7548 EMIT_NEW_PCONST (cfg, ins, NULL);
7549 ins->type = STACK_OBJ;
7554 CHECK_STACK_OVF (1);
7555 EMIT_NEW_ICONST (cfg, ins, -1);
7568 CHECK_STACK_OVF (1);
7569 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
7575 CHECK_STACK_OVF (1);
7577 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
7583 CHECK_STACK_OVF (1);
7584 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
7590 CHECK_STACK_OVF (1);
7591 MONO_INST_NEW (cfg, ins, OP_I8CONST);
7592 ins->type = STACK_I8;
7593 ins->dreg = alloc_dreg (cfg, STACK_I8);
7595 ins->inst_l = (gint64)read64 (ip);
7596 MONO_ADD_INS (bblock, ins);
7602 gboolean use_aotconst = FALSE;
7604 #ifdef TARGET_POWERPC
7605 /* FIXME: Clean this up */
7606 if (cfg->compile_aot)
7607 use_aotconst = TRUE;
7610 /* FIXME: we should really allocate this only late in the compilation process */
7611 f = mono_domain_alloc (cfg->domain, sizeof (float));
7613 CHECK_STACK_OVF (1);
7619 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
7621 dreg = alloc_freg (cfg);
7622 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
7623 ins->type = STACK_R8;
7625 MONO_INST_NEW (cfg, ins, OP_R4CONST);
7626 ins->type = STACK_R8;
7627 ins->dreg = alloc_dreg (cfg, STACK_R8);
7629 MONO_ADD_INS (bblock, ins);
7639 gboolean use_aotconst = FALSE;
7641 #ifdef TARGET_POWERPC
7642 /* FIXME: Clean this up */
7643 if (cfg->compile_aot)
7644 use_aotconst = TRUE;
7647 /* FIXME: we should really allocate this only late in the compilation process */
7648 d = mono_domain_alloc (cfg->domain, sizeof (double));
7650 CHECK_STACK_OVF (1);
7656 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
7658 dreg = alloc_freg (cfg);
7659 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
7660 ins->type = STACK_R8;
7662 MONO_INST_NEW (cfg, ins, OP_R8CONST);
7663 ins->type = STACK_R8;
7664 ins->dreg = alloc_dreg (cfg, STACK_R8);
7666 MONO_ADD_INS (bblock, ins);
7675 MonoInst *temp, *store;
7677 CHECK_STACK_OVF (1);
7681 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
7682 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
7684 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
7687 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
7700 if (sp [0]->type == STACK_R8)
7701 /* we need to pop the value from the x86 FP stack */
7702 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
7708 INLINE_FAILURE ("jmp");
7709 GSHAREDVT_FAILURE (*ip);
7712 if (stack_start != sp)
7714 token = read32 (ip + 1);
7715 /* FIXME: check the signature matches */
7716 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7718 if (!cmethod || mono_loader_get_last_error ())
7721 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
7722 GENERIC_SHARING_FAILURE (CEE_JMP);
7724 if (mono_security_cas_enabled ())
7725 CHECK_CFG_EXCEPTION;
7727 if (ARCH_HAVE_OP_TAIL_CALL) {
7728 MonoMethodSignature *fsig = mono_method_signature (cmethod);
7731 /* Handle tail calls similarly to calls */
7732 n = fsig->param_count + fsig->hasthis;
7736 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
7737 call->method = cmethod;
7738 call->tail_call = TRUE;
7739 call->signature = mono_method_signature (cmethod);
7740 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
7741 call->inst.inst_p0 = cmethod;
7742 for (i = 0; i < n; ++i)
7743 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
7745 mono_arch_emit_call (cfg, call);
7746 MONO_ADD_INS (bblock, (MonoInst*)call);
7748 for (i = 0; i < num_args; ++i)
7749 /* Prevent arguments from being optimized away */
7750 arg_array [i]->flags |= MONO_INST_VOLATILE;
7752 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
7753 ins = (MonoInst*)call;
7754 ins->inst_p0 = cmethod;
7755 MONO_ADD_INS (bblock, ins);
7759 start_new_bblock = 1;
7764 case CEE_CALLVIRT: {
7765 MonoInst *addr = NULL;
7766 MonoMethodSignature *fsig = NULL;
7768 int virtual = *ip == CEE_CALLVIRT;
7769 int calli = *ip == CEE_CALLI;
7770 gboolean pass_imt_from_rgctx = FALSE;
7771 MonoInst *imt_arg = NULL;
7772 MonoInst *keep_this_alive = NULL;
7773 gboolean pass_vtable = FALSE;
7774 gboolean pass_mrgctx = FALSE;
7775 MonoInst *vtable_arg = NULL;
7776 gboolean check_this = FALSE;
7777 gboolean supported_tail_call = FALSE;
7778 gboolean tail_call = FALSE;
7779 gboolean need_seq_point = FALSE;
7780 guint32 call_opcode = *ip;
7781 gboolean emit_widen = TRUE;
7782 gboolean push_res = TRUE;
7783 gboolean skip_ret = FALSE;
7784 gboolean delegate_invoke = FALSE;
7787 token = read32 (ip + 1);
7792 //GSHAREDVT_FAILURE (*ip);
7797 fsig = mini_get_signature (method, token, generic_context);
7798 n = fsig->param_count + fsig->hasthis;
7800 if (method->dynamic && fsig->pinvoke) {
7804 * This is a call through a function pointer using a pinvoke
7805 * signature. Have to create a wrapper and call that instead.
7806 * FIXME: This is very slow, need to create a wrapper at JIT time
7807 * instead based on the signature.
7809 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
7810 EMIT_NEW_PCONST (cfg, args [1], fsig);
7812 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
7815 MonoMethod *cil_method;
7817 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7818 cil_method = cmethod;
7820 if (constrained_call) {
7821 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7822 if (cfg->verbose_level > 2)
7823 printf ("DM Constrained call to %s\n", mono_type_get_full_name (constrained_call));
7824 if (!((constrained_call->byval_arg.type == MONO_TYPE_VAR ||
7825 constrained_call->byval_arg.type == MONO_TYPE_MVAR) &&
7826 cfg->generic_sharing_context)) {
7827 cmethod = mono_get_method_constrained_with_method (image, cil_method, constrained_call, generic_context);
7830 if (cfg->verbose_level > 2)
7831 printf ("Constrained call to %s\n", mono_type_get_full_name (constrained_call));
7833 if ((constrained_call->byval_arg.type == MONO_TYPE_VAR || constrained_call->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
7835 * This is needed since get_method_constrained can't find
7836 * the method in klass representing a type var.
7837 * The type var is guaranteed to be a reference type in this
7840 if (!mini_is_gsharedvt_klass (cfg, constrained_call))
7841 g_assert (!cmethod->klass->valuetype);
7843 cmethod = mono_get_method_constrained (image, token, constrained_call, generic_context, &cil_method);
7848 if (!cmethod || mono_loader_get_last_error ())
7850 if (!dont_verify && !cfg->skip_visibility) {
7851 MonoMethod *target_method = cil_method;
7852 if (method->is_inflated) {
7853 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
7855 if (!mono_method_can_access_method (method_definition, target_method) &&
7856 !mono_method_can_access_method (method, cil_method))
7857 METHOD_ACCESS_FAILURE;
7860 if (mono_security_core_clr_enabled ())
7861 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
7863 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
7864 /* MS.NET seems to silently convert this to a callvirt */
7869 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
7870 * converts to a callvirt.
7872 * tests/bug-515884.il is an example of this behavior
7874 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
7875 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
7876 if (!virtual && mono_class_is_marshalbyref (cmethod->klass) && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
7880 if (!cmethod->klass->inited)
7881 if (!mono_class_init (cmethod->klass))
7882 TYPE_LOAD_ERROR (cmethod->klass);
7884 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
7885 mini_class_is_system_array (cmethod->klass)) {
7886 array_rank = cmethod->klass->rank;
7887 fsig = mono_method_signature (cmethod);
7889 fsig = mono_method_signature (cmethod);
7894 if (fsig->pinvoke) {
7895 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
7896 check_for_pending_exc, cfg->compile_aot);
7897 fsig = mono_method_signature (wrapper);
7898 } else if (constrained_call) {
7899 fsig = mono_method_signature (cmethod);
7901 fsig = mono_method_get_signature_full (cmethod, image, token, generic_context);
7905 mono_save_token_info (cfg, image, token, cil_method);
7907 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
7909 * Need to emit an implicit seq point after every non-void call so single stepping through nested calls like
7910 * foo (bar (), baz ())
7911 * works correctly. MS does this also:
7912 * http://stackoverflow.com/questions/6937198/making-your-net-language-step-correctly-in-the-debugger
7913 * The problem with this approach is that the debugger will stop after all calls returning a value,
7914 * even for simple cases, like:
7917 /* Special case a few common successor opcodes */
7918 if (!(ip + 5 < end && (ip [5] == CEE_POP || ip [5] == CEE_NOP)) && !(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip + 5 - header->code)))
7919 need_seq_point = TRUE;
7922 n = fsig->param_count + fsig->hasthis;
7924 /* Don't support calls made using type arguments for now */
7926 if (cfg->gsharedvt) {
7927 if (mini_is_gsharedvt_signature (cfg, fsig))
7928 GSHAREDVT_FAILURE (*ip);
7932 if (mono_security_cas_enabled ()) {
7933 if (check_linkdemand (cfg, method, cmethod))
7934 INLINE_FAILURE ("linkdemand");
7935 CHECK_CFG_EXCEPTION;
7938 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
7939 g_assert_not_reached ();
7942 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
7945 if (!cfg->generic_sharing_context && cmethod)
7946 g_assert (!mono_method_check_context_used (cmethod));
7950 //g_assert (!virtual || fsig->hasthis);
7954 if (constrained_call) {
7955 if (mini_is_gsharedvt_klass (cfg, constrained_call)) {
7957 * Constrained calls need to behave differently at runtime dependending on whenever the receiver is instantiated as ref type or as a vtype.
7959 if ((cmethod->klass != mono_defaults.object_class) && constrained_call->valuetype && cmethod->klass->valuetype) {
7960 /* The 'Own method' case below */
7961 } else if (cmethod->klass->image != mono_defaults.corlib && !(cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) && !cmethod->klass->valuetype) {
7962 /* 'The type parameter is instantiated as a reference type' case below. */
7963 } else if (((cmethod->klass == mono_defaults.object_class) || (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) || (!cmethod->klass->valuetype && cmethod->klass->image != mono_defaults.corlib)) &&
7964 (MONO_TYPE_IS_VOID (fsig->ret) || MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_IS_REFERENCE (fsig->ret) || mini_is_gsharedvt_type (cfg, fsig->ret)) &&
7965 (fsig->param_count == 0 || (!fsig->hasthis && fsig->param_count == 1) || (fsig->param_count == 1 && (MONO_TYPE_IS_REFERENCE (fsig->params [0]) || mini_is_gsharedvt_type (cfg, fsig->params [0]))))) {
7966 MonoInst *args [16];
7969 * This case handles calls to
7970 * - object:ToString()/Equals()/GetHashCode(),
7971 * - System.IComparable<T>:CompareTo()
7972 * - System.IEquatable<T>:Equals ()
7973 * plus some simple interface calls enough to support AsyncTaskMethodBuilder.
7977 if (mono_method_check_context_used (cmethod))
7978 args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (cmethod), cmethod, MONO_RGCTX_INFO_METHOD);
7980 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
7981 args [2] = emit_get_rgctx_klass (cfg, mono_class_check_context_used (constrained_call), constrained_call, MONO_RGCTX_INFO_KLASS);
7983 /* !fsig->hasthis is for the wrapper for the Object.GetType () icall */
7984 if (fsig->hasthis && fsig->param_count) {
7985 /* Pass the arguments using a localloc-ed array using the format expected by runtime_invoke () */
7986 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
7987 ins->dreg = alloc_preg (cfg);
7988 ins->inst_imm = fsig->param_count * sizeof (mgreg_t);
7989 MONO_ADD_INS (cfg->cbb, ins);
7992 if (mini_is_gsharedvt_type (cfg, fsig->params [0])) {
7995 args [3] = emit_get_gsharedvt_info_klass (cfg, mono_class_from_mono_type (fsig->params [0]), MONO_RGCTX_INFO_CLASS_BOX_TYPE);
7997 EMIT_NEW_VARLOADA_VREG (cfg, ins, sp [1]->dreg, fsig->params [0]);
7998 addr_reg = ins->dreg;
7999 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, addr_reg);
8001 EMIT_NEW_ICONST (cfg, args [3], 0);
8002 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, sp [1]->dreg);
8005 EMIT_NEW_ICONST (cfg, args [3], 0);
8006 EMIT_NEW_ICONST (cfg, args [4], 0);
8008 ins = mono_emit_jit_icall (cfg, mono_gsharedvt_constrained_call, args);
8011 if (mini_is_gsharedvt_type (cfg, fsig->ret)) {
8012 ins = handle_unbox_gsharedvt (cfg, mono_class_from_mono_type (fsig->ret), ins, &bblock);
8013 } else if (MONO_TYPE_IS_PRIMITIVE (fsig->ret)) {
8017 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), ins->dreg, sizeof (MonoObject));
8018 MONO_ADD_INS (cfg->cbb, add);
8020 NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, add->dreg, 0);
8021 MONO_ADD_INS (cfg->cbb, ins);
8022 /* ins represents the call result */
8027 GSHAREDVT_FAILURE (*ip);
8031 * We have the `constrained.' prefix opcode.
8033 if (constrained_call->valuetype && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
8035 * The type parameter is instantiated as a valuetype,
8036 * but that type doesn't override the method we're
8037 * calling, so we need to box `this'.
8039 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
8040 ins->klass = constrained_call;
8041 sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call), &bblock);
8042 CHECK_CFG_EXCEPTION;
8043 } else if (!constrained_call->valuetype) {
8044 int dreg = alloc_ireg_ref (cfg);
8047 * The type parameter is instantiated as a reference
8048 * type. We have a managed pointer on the stack, so
8049 * we need to dereference it here.
8051 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
8052 ins->type = STACK_OBJ;
8055 if (cmethod->klass->valuetype) {
8058 /* Interface method */
8061 mono_class_setup_vtable (constrained_call);
8062 CHECK_TYPELOAD (constrained_call);
8063 ioffset = mono_class_interface_offset (constrained_call, cmethod->klass);
8065 TYPE_LOAD_ERROR (constrained_call);
8066 slot = mono_method_get_vtable_slot (cmethod);
8068 TYPE_LOAD_ERROR (cmethod->klass);
8069 cmethod = constrained_call->vtable [ioffset + slot];
8071 if (cmethod->klass == mono_defaults.enum_class) {
8072 /* Enum implements some interfaces, so treat this as the first case */
8073 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
8074 ins->klass = constrained_call;
8075 sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call), &bblock);
8076 CHECK_CFG_EXCEPTION;
8081 constrained_call = NULL;
8084 if (!calli && check_call_signature (cfg, fsig, sp))
8087 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
8088 if (cmethod && (cmethod->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (cmethod->name, "Invoke"))
8089 delegate_invoke = TRUE;
8092 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_sharable_method (cfg, cmethod, fsig, sp))) {
8094 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8095 type_to_eval_stack_type ((cfg), fsig->ret, ins);
8103 * If the callee is a shared method, then its static cctor
8104 * might not get called after the call was patched.
8106 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
8107 emit_generic_class_init (cfg, cmethod->klass);
8108 CHECK_TYPELOAD (cmethod->klass);
8112 check_method_sharing (cfg, cmethod, &pass_vtable, &pass_mrgctx);
8114 if (cfg->generic_sharing_context && cmethod) {
8115 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
8117 context_used = mini_method_check_context_used (cfg, cmethod);
8119 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
8120 /* Generic method interface
8121 calls are resolved via a
8122 helper function and don't
8124 if (!cmethod_context || !cmethod_context->method_inst)
8125 pass_imt_from_rgctx = TRUE;
8129 * If a shared method calls another
8130 * shared method then the caller must
8131 * have a generic sharing context
8132 * because the magic trampoline
8133 * requires it. FIXME: We shouldn't
8134 * have to force the vtable/mrgctx
8135 * variable here. Instead there
8136 * should be a flag in the cfg to
8137 * request a generic sharing context.
8140 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
8141 mono_get_vtable_var (cfg);
8146 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
8148 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
8150 CHECK_TYPELOAD (cmethod->klass);
8151 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
8156 g_assert (!vtable_arg);
8158 if (!cfg->compile_aot) {
8160 * emit_get_rgctx_method () calls mono_class_vtable () so check
8161 * for type load errors before.
8163 mono_class_setup_vtable (cmethod->klass);
8164 CHECK_TYPELOAD (cmethod->klass);
8167 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
8169 /* !marshalbyref is needed to properly handle generic methods + remoting */
8170 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
8171 MONO_METHOD_IS_FINAL (cmethod)) &&
8172 !mono_class_is_marshalbyref (cmethod->klass)) {
8179 if (pass_imt_from_rgctx) {
8180 g_assert (!pass_vtable);
8183 imt_arg = emit_get_rgctx_method (cfg, context_used,
8184 cmethod, MONO_RGCTX_INFO_METHOD);
8188 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
8190 /* Calling virtual generic methods */
8191 if (cmethod && virtual &&
8192 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
8193 !(MONO_METHOD_IS_FINAL (cmethod) &&
8194 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
8195 fsig->generic_param_count &&
8196 !(cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))) {
8197 MonoInst *this_temp, *this_arg_temp, *store;
8198 MonoInst *iargs [4];
8199 gboolean use_imt = FALSE;
8201 g_assert (fsig->is_inflated);
8203 /* Prevent inlining of methods that contain indirect calls */
8204 INLINE_FAILURE ("virtual generic call");
8206 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))
8207 GSHAREDVT_FAILURE (*ip);
8209 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK && defined(MONO_ARCH_GSHARED_SUPPORTED)
8210 if (cmethod->wrapper_type == MONO_WRAPPER_NONE && mono_use_imt)
8215 g_assert (!imt_arg);
8217 g_assert (cmethod->is_inflated);
8218 imt_arg = emit_get_rgctx_method (cfg, context_used,
8219 cmethod, MONO_RGCTX_INFO_METHOD);
8220 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, sp [0], imt_arg, NULL);
8222 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
8223 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
8224 MONO_ADD_INS (bblock, store);
8226 /* FIXME: This should be a managed pointer */
8227 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
8229 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
8230 iargs [1] = emit_get_rgctx_method (cfg, context_used,
8231 cmethod, MONO_RGCTX_INFO_METHOD);
8232 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
8233 addr = mono_emit_jit_icall (cfg,
8234 mono_helper_compile_generic_method, iargs);
8236 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
8238 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8245 * Implement a workaround for the inherent races involved in locking:
8251 * If a thread abort happens between the call to Monitor.Enter () and the start of the
8252 * try block, the Exit () won't be executed, see:
8253 * http://www.bluebytesoftware.com/blog/2007/01/30/MonitorEnterThreadAbortsAndOrphanedLocks.aspx
8254 * To work around this, we extend such try blocks to include the last x bytes
8255 * of the Monitor.Enter () call.
8257 if (cmethod && cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
8258 MonoBasicBlock *tbb;
8260 GET_BBLOCK (cfg, tbb, ip + 5);
8262 * Only extend try blocks with a finally, to avoid catching exceptions thrown
8263 * from Monitor.Enter like ArgumentNullException.
8265 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
8266 /* Mark this bblock as needing to be extended */
8267 tbb->extend_try_block = TRUE;
8271 /* Conversion to a JIT intrinsic */
8272 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
8274 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8275 type_to_eval_stack_type ((cfg), fsig->ret, ins);
8282 if (cmethod && (cfg->opt & MONO_OPT_INLINE) &&
8283 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
8284 !disable_inline && mono_method_check_inlining (cfg, cmethod) &&
8285 !g_list_find (dont_inline, cmethod)) {
8287 gboolean always = FALSE;
8289 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
8290 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
8291 /* Prevent inlining of methods that call wrappers */
8292 INLINE_FAILURE ("wrapper call");
8293 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
8297 costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, always);
8299 cfg->real_offset += 5;
8302 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8303 /* *sp is already set by inline_method */
8308 inline_costs += costs;
8314 /* Tail recursion elimination */
8315 if ((cfg->opt & MONO_OPT_TAILC) && call_opcode == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
8316 gboolean has_vtargs = FALSE;
8319 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
8320 INLINE_FAILURE ("tail call");
8322 /* keep it simple */
8323 for (i = fsig->param_count - 1; i >= 0; i--) {
8324 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
8329 for (i = 0; i < n; ++i)
8330 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
8331 MONO_INST_NEW (cfg, ins, OP_BR);
8332 MONO_ADD_INS (bblock, ins);
8333 tblock = start_bblock->out_bb [0];
8334 link_bblock (cfg, bblock, tblock);
8335 ins->inst_target_bb = tblock;
8336 start_new_bblock = 1;
8338 /* skip the CEE_RET, too */
8339 if (ip_in_bb (cfg, bblock, ip + 5))
8346 inline_costs += 10 * num_calls++;
8349 * Making generic calls out of gsharedvt methods.
8351 if (cmethod && cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
8352 MonoRgctxInfoType info_type;
8355 //if (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)
8356 //GSHAREDVT_FAILURE (*ip);
8357 // disable for possible remoting calls
8358 if (fsig->hasthis && (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class))
8359 GSHAREDVT_FAILURE (*ip);
8360 if (fsig->generic_param_count) {
8361 /* virtual generic call */
8362 g_assert (mono_use_imt);
8363 g_assert (!imt_arg);
8364 /* Same as the virtual generic case above */
8365 imt_arg = emit_get_rgctx_method (cfg, context_used,
8366 cmethod, MONO_RGCTX_INFO_METHOD);
8367 /* This is not needed, as the trampoline code will pass one, and it might be passed in the same reg as the imt arg */
8372 if (cmethod->klass->rank && cmethod->klass->byval_arg.type != MONO_TYPE_SZARRAY)
8373 /* test_0_multi_dim_arrays () in gshared.cs */
8374 GSHAREDVT_FAILURE (*ip);
8376 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (cmethod->name, "Invoke")))
8377 keep_this_alive = sp [0];
8379 if (virtual && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))
8380 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE_VIRT;
8382 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE;
8383 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, info_type);
8385 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
8387 } else if (calli && cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
8389 * We pass the address to the gsharedvt trampoline in the rgctx reg
8391 MonoInst *callee = addr;
8393 if (method->wrapper_type != MONO_WRAPPER_DELEGATE_INVOKE)
8395 GSHAREDVT_FAILURE (*ip);
8397 addr = emit_get_rgctx_sig (cfg, context_used,
8398 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
8399 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, callee);
8403 /* Generic sharing */
8404 /* FIXME: only do this for generic methods if
8405 they are not shared! */
8406 if (context_used && !imt_arg && !array_rank && !delegate_invoke &&
8407 (!mono_method_is_generic_sharable (cmethod, TRUE) ||
8408 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
8409 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
8410 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
8411 INLINE_FAILURE ("gshared");
8413 g_assert (cfg->generic_sharing_context && cmethod);
8417 * We are compiling a call to a
8418 * generic method from shared code,
8419 * which means that we have to look up
8420 * the method in the rgctx and do an
8424 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
8426 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
8427 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
8431 /* Indirect calls */
8433 if (call_opcode == CEE_CALL)
8434 g_assert (context_used);
8435 else if (call_opcode == CEE_CALLI)
8436 g_assert (!vtable_arg);
8438 /* FIXME: what the hell is this??? */
8439 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
8440 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
8442 /* Prevent inlining of methods with indirect calls */
8443 INLINE_FAILURE ("indirect call");
8445 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST || addr->opcode == OP_GOT_ENTRY) {
8450 * Instead of emitting an indirect call, emit a direct call
8451 * with the contents of the aotconst as the patch info.
8453 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST) {
8454 info_type = addr->inst_c1;
8455 info_data = addr->inst_p0;
8457 info_type = addr->inst_right->inst_c1;
8458 info_data = addr->inst_right->inst_left;
8461 if (info_type == MONO_PATCH_INFO_ICALL_ADDR || info_type == MONO_PATCH_INFO_JIT_ICALL_ADDR) {
8462 ins = (MonoInst*)mono_emit_abs_call (cfg, info_type, info_data, fsig, sp);
8467 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
8475 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
8476 MonoInst *val = sp [fsig->param_count];
8478 if (val->type == STACK_OBJ) {
8479 MonoInst *iargs [2];
8484 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
8487 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
8488 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, val->dreg);
8489 if (cfg->gen_write_barriers && val->type == STACK_OBJ && !(val->opcode == OP_PCONST && val->inst_c0 == 0))
8490 emit_write_barrier (cfg, addr, val);
8491 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
8492 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
8494 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
8495 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
8496 if (!cmethod->klass->element_class->valuetype && !readonly)
8497 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
8498 CHECK_TYPELOAD (cmethod->klass);
8501 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
8504 g_assert_not_reached ();
8511 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
8515 /* Tail prefix / tail call optimization */
8517 /* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests */
8518 /* FIXME: runtime generic context pointer for jumps? */
8519 /* FIXME: handle this for generic sharing eventually */
8520 if (cmethod && (ins_flag & MONO_INST_TAILCALL) &&
8521 !vtable_arg && !cfg->generic_sharing_context && is_supported_tail_call (cfg, method, cmethod, fsig, call_opcode))
8522 supported_tail_call = TRUE;
8524 if (supported_tail_call) {
8527 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
8528 INLINE_FAILURE ("tail call");
8530 //printf ("HIT: %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
8532 if (ARCH_HAVE_OP_TAIL_CALL) {
8533 /* Handle tail calls similarly to normal calls */
8536 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
8537 call->tail_call = TRUE;
8538 call->method = cmethod;
8539 call->signature = mono_method_signature (cmethod);
8542 * We implement tail calls by storing the actual arguments into the
8543 * argument variables, then emitting a CEE_JMP.
8545 for (i = 0; i < n; ++i) {
8546 /* Prevent argument from being register allocated */
8547 arg_array [i]->flags |= MONO_INST_VOLATILE;
8548 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
8550 ins = (MonoInst*)call;
8551 ins->inst_p0 = cmethod;
8552 ins->inst_p1 = arg_array [0];
8553 MONO_ADD_INS (bblock, ins);
8554 link_bblock (cfg, bblock, end_bblock);
8555 start_new_bblock = 1;
8557 // FIXME: Eliminate unreachable epilogs
8560 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
8561 * only reachable from this call.
8563 GET_BBLOCK (cfg, tblock, ip + 5);
8564 if (tblock == bblock || tblock->in_count == 0)
8573 * Synchronized wrappers.
8574 * Its hard to determine where to replace a method with its synchronized
8575 * wrapper without causing an infinite recursion. The current solution is
8576 * to add the synchronized wrapper in the trampolines, and to
8577 * change the called method to a dummy wrapper, and resolve that wrapper
8578 * to the real method in mono_jit_compile_method ().
8580 if (cfg->method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
8581 MonoMethod *orig = mono_marshal_method_from_wrapper (cfg->method);
8582 if (cmethod == orig || (cmethod->is_inflated && mono_method_get_declaring_generic_method (cmethod) == orig))
8583 cmethod = mono_marshal_get_synchronized_inner_wrapper (cmethod);
8587 INLINE_FAILURE ("call");
8588 ins = mono_emit_method_call_full (cfg, cmethod, fsig, tail_call, sp, virtual ? sp [0] : NULL,
8589 imt_arg, vtable_arg);
8592 link_bblock (cfg, bblock, end_bblock);
8593 start_new_bblock = 1;
8595 // FIXME: Eliminate unreachable epilogs
8598 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
8599 * only reachable from this call.
8601 GET_BBLOCK (cfg, tblock, ip + 5);
8602 if (tblock == bblock || tblock->in_count == 0)
8609 /* End of call, INS should contain the result of the call, if any */
8611 if (push_res && !MONO_TYPE_IS_VOID (fsig->ret)) {
8614 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
8619 if (keep_this_alive) {
8620 MonoInst *dummy_use;
8622 /* See mono_emit_method_call_full () */
8623 EMIT_NEW_DUMMY_USE (cfg, dummy_use, keep_this_alive);
8626 CHECK_CFG_EXCEPTION;
8630 g_assert (*ip == CEE_RET);
8634 constrained_call = NULL;
8636 emit_seq_point (cfg, method, ip, FALSE, TRUE);
8640 if (cfg->method != method) {
8641 /* return from inlined method */
8643 * If in_count == 0, that means the ret is unreachable due to
8644 * being preceeded by a throw. In that case, inline_method () will
8645 * handle setting the return value
8646 * (test case: test_0_inline_throw ()).
8648 if (return_var && cfg->cbb->in_count) {
8649 MonoType *ret_type = mono_method_signature (method)->ret;
8655 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
8658 //g_assert (returnvar != -1);
8659 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
8660 cfg->ret_var_set = TRUE;
8663 if (cfg->lmf_var && cfg->cbb->in_count)
8667 MonoType *ret_type = mini_replace_type (mono_method_signature (method)->ret);
8669 if (seq_points && !sym_seq_points) {
8671 * Place a seq point here too even through the IL stack is not
8672 * empty, so a step over on
8675 * will work correctly.
8677 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
8678 MONO_ADD_INS (cfg->cbb, ins);
8681 g_assert (!return_var);
8685 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
8688 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
8691 if (!cfg->vret_addr) {
8694 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
8696 EMIT_NEW_RETLOADA (cfg, ret_addr);
8698 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
8699 ins->klass = mono_class_from_mono_type (ret_type);
8702 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
8703 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
8704 MonoInst *iargs [1];
8708 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
8709 mono_arch_emit_setret (cfg, method, conv);
8711 mono_arch_emit_setret (cfg, method, *sp);
8714 mono_arch_emit_setret (cfg, method, *sp);
8719 if (sp != stack_start)
8721 MONO_INST_NEW (cfg, ins, OP_BR);
8723 ins->inst_target_bb = end_bblock;
8724 MONO_ADD_INS (bblock, ins);
8725 link_bblock (cfg, bblock, end_bblock);
8726 start_new_bblock = 1;
8730 MONO_INST_NEW (cfg, ins, OP_BR);
8732 target = ip + 1 + (signed char)(*ip);
8734 GET_BBLOCK (cfg, tblock, target);
8735 link_bblock (cfg, bblock, tblock);
8736 ins->inst_target_bb = tblock;
8737 if (sp != stack_start) {
8738 handle_stack_args (cfg, stack_start, sp - stack_start);
8740 CHECK_UNVERIFIABLE (cfg);
8742 MONO_ADD_INS (bblock, ins);
8743 start_new_bblock = 1;
8744 inline_costs += BRANCH_COST;
8758 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
8760 target = ip + 1 + *(signed char*)ip;
8766 inline_costs += BRANCH_COST;
8770 MONO_INST_NEW (cfg, ins, OP_BR);
8773 target = ip + 4 + (gint32)read32(ip);
8775 GET_BBLOCK (cfg, tblock, target);
8776 link_bblock (cfg, bblock, tblock);
8777 ins->inst_target_bb = tblock;
8778 if (sp != stack_start) {
8779 handle_stack_args (cfg, stack_start, sp - stack_start);
8781 CHECK_UNVERIFIABLE (cfg);
8784 MONO_ADD_INS (bblock, ins);
8786 start_new_bblock = 1;
8787 inline_costs += BRANCH_COST;
8794 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
8795 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
8796 guint32 opsize = is_short ? 1 : 4;
8798 CHECK_OPSIZE (opsize);
8800 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
8803 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
8808 GET_BBLOCK (cfg, tblock, target);
8809 link_bblock (cfg, bblock, tblock);
8810 GET_BBLOCK (cfg, tblock, ip);
8811 link_bblock (cfg, bblock, tblock);
8813 if (sp != stack_start) {
8814 handle_stack_args (cfg, stack_start, sp - stack_start);
8815 CHECK_UNVERIFIABLE (cfg);
8818 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
8819 cmp->sreg1 = sp [0]->dreg;
8820 type_from_op (cmp, sp [0], NULL);
8823 #if SIZEOF_REGISTER == 4
8824 if (cmp->opcode == OP_LCOMPARE_IMM) {
8825 /* Convert it to OP_LCOMPARE */
8826 MONO_INST_NEW (cfg, ins, OP_I8CONST);
8827 ins->type = STACK_I8;
8828 ins->dreg = alloc_dreg (cfg, STACK_I8);
8830 MONO_ADD_INS (bblock, ins);
8831 cmp->opcode = OP_LCOMPARE;
8832 cmp->sreg2 = ins->dreg;
8835 MONO_ADD_INS (bblock, cmp);
8837 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
8838 type_from_op (ins, sp [0], NULL);
8839 MONO_ADD_INS (bblock, ins);
8840 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
8841 GET_BBLOCK (cfg, tblock, target);
8842 ins->inst_true_bb = tblock;
8843 GET_BBLOCK (cfg, tblock, ip);
8844 ins->inst_false_bb = tblock;
8845 start_new_bblock = 2;
8848 inline_costs += BRANCH_COST;
8863 MONO_INST_NEW (cfg, ins, *ip);
8865 target = ip + 4 + (gint32)read32(ip);
8871 inline_costs += BRANCH_COST;
8875 MonoBasicBlock **targets;
8876 MonoBasicBlock *default_bblock;
8877 MonoJumpInfoBBTable *table;
8878 int offset_reg = alloc_preg (cfg);
8879 int target_reg = alloc_preg (cfg);
8880 int table_reg = alloc_preg (cfg);
8881 int sum_reg = alloc_preg (cfg);
8882 gboolean use_op_switch;
8886 n = read32 (ip + 1);
8889 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
8893 CHECK_OPSIZE (n * sizeof (guint32));
8894 target = ip + n * sizeof (guint32);
8896 GET_BBLOCK (cfg, default_bblock, target);
8897 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
8899 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
8900 for (i = 0; i < n; ++i) {
8901 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
8902 targets [i] = tblock;
8903 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
8907 if (sp != stack_start) {
8909 * Link the current bb with the targets as well, so handle_stack_args
8910 * will set their in_stack correctly.
8912 link_bblock (cfg, bblock, default_bblock);
8913 for (i = 0; i < n; ++i)
8914 link_bblock (cfg, bblock, targets [i]);
8916 handle_stack_args (cfg, stack_start, sp - stack_start);
8918 CHECK_UNVERIFIABLE (cfg);
8921 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
8922 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
8925 for (i = 0; i < n; ++i)
8926 link_bblock (cfg, bblock, targets [i]);
8928 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
8929 table->table = targets;
8930 table->table_size = n;
8932 use_op_switch = FALSE;
8934 /* ARM implements SWITCH statements differently */
8935 /* FIXME: Make it use the generic implementation */
8936 if (!cfg->compile_aot)
8937 use_op_switch = TRUE;
8940 if (COMPILE_LLVM (cfg))
8941 use_op_switch = TRUE;
8943 cfg->cbb->has_jump_table = 1;
8945 if (use_op_switch) {
8946 MONO_INST_NEW (cfg, ins, OP_SWITCH);
8947 ins->sreg1 = src1->dreg;
8948 ins->inst_p0 = table;
8949 ins->inst_many_bb = targets;
8950 ins->klass = GUINT_TO_POINTER (n);
8951 MONO_ADD_INS (cfg->cbb, ins);
8953 if (sizeof (gpointer) == 8)
8954 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
8956 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
8958 #if SIZEOF_REGISTER == 8
8959 /* The upper word might not be zero, and we add it to a 64 bit address later */
8960 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
8963 if (cfg->compile_aot) {
8964 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
8966 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
8967 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
8968 ins->inst_p0 = table;
8969 ins->dreg = table_reg;
8970 MONO_ADD_INS (cfg->cbb, ins);
8973 /* FIXME: Use load_memindex */
8974 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
8975 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
8976 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
8978 start_new_bblock = 1;
8979 inline_costs += (BRANCH_COST * 2);
8999 dreg = alloc_freg (cfg);
9002 dreg = alloc_lreg (cfg);
9005 dreg = alloc_ireg_ref (cfg);
9008 dreg = alloc_preg (cfg);
9011 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
9012 ins->type = ldind_type [*ip - CEE_LDIND_I1];
9013 ins->flags |= ins_flag;
9015 MONO_ADD_INS (bblock, ins);
9017 if (ins->flags & MONO_INST_VOLATILE) {
9018 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
9019 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
9020 emit_memory_barrier (cfg, FullBarrier);
9035 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
9036 ins->flags |= ins_flag;
9039 if (ins->flags & MONO_INST_VOLATILE) {
9040 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
9041 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
9042 emit_memory_barrier (cfg, FullBarrier);
9045 MONO_ADD_INS (bblock, ins);
9047 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0)))
9048 emit_write_barrier (cfg, sp [0], sp [1]);
9057 MONO_INST_NEW (cfg, ins, (*ip));
9059 ins->sreg1 = sp [0]->dreg;
9060 ins->sreg2 = sp [1]->dreg;
9061 type_from_op (ins, sp [0], sp [1]);
9063 ins->dreg = alloc_dreg ((cfg), (ins)->type);
9065 /* Use the immediate opcodes if possible */
9066 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
9067 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
9068 if (imm_opcode != -1) {
9069 ins->opcode = imm_opcode;
9070 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
9073 sp [1]->opcode = OP_NOP;
9077 MONO_ADD_INS ((cfg)->cbb, (ins));
9079 *sp++ = mono_decompose_opcode (cfg, ins);
9096 MONO_INST_NEW (cfg, ins, (*ip));
9098 ins->sreg1 = sp [0]->dreg;
9099 ins->sreg2 = sp [1]->dreg;
9100 type_from_op (ins, sp [0], sp [1]);
9102 ADD_WIDEN_OP (ins, sp [0], sp [1]);
9103 ins->dreg = alloc_dreg ((cfg), (ins)->type);
9105 /* FIXME: Pass opcode to is_inst_imm */
9107 /* Use the immediate opcodes if possible */
9108 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
9111 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
9112 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
9113 /* Keep emulated opcodes which are optimized away later */
9114 if ((ins->opcode == OP_IREM_UN || ins->opcode == OP_IDIV_UN_IMM) && (cfg->opt & (MONO_OPT_CONSPROP | MONO_OPT_COPYPROP)) && sp [1]->opcode == OP_ICONST && mono_is_power_of_two (sp [1]->inst_c0) >= 0) {
9115 imm_opcode = mono_op_to_op_imm (ins->opcode);
9118 if (imm_opcode != -1) {
9119 ins->opcode = imm_opcode;
9120 if (sp [1]->opcode == OP_I8CONST) {
9121 #if SIZEOF_REGISTER == 8
9122 ins->inst_imm = sp [1]->inst_l;
9124 ins->inst_ls_word = sp [1]->inst_ls_word;
9125 ins->inst_ms_word = sp [1]->inst_ms_word;
9129 ins->inst_imm = (gssize)(sp [1]->inst_c0);
9132 /* Might be followed by an instruction added by ADD_WIDEN_OP */
9133 if (sp [1]->next == NULL)
9134 sp [1]->opcode = OP_NOP;
9137 MONO_ADD_INS ((cfg)->cbb, (ins));
9139 *sp++ = mono_decompose_opcode (cfg, ins);
9152 case CEE_CONV_OVF_I8:
9153 case CEE_CONV_OVF_U8:
9157 /* Special case this earlier so we have long constants in the IR */
9158 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
9159 int data = sp [-1]->inst_c0;
9160 sp [-1]->opcode = OP_I8CONST;
9161 sp [-1]->type = STACK_I8;
9162 #if SIZEOF_REGISTER == 8
9163 if ((*ip) == CEE_CONV_U8)
9164 sp [-1]->inst_c0 = (guint32)data;
9166 sp [-1]->inst_c0 = data;
9168 sp [-1]->inst_ls_word = data;
9169 if ((*ip) == CEE_CONV_U8)
9170 sp [-1]->inst_ms_word = 0;
9172 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
9174 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
9181 case CEE_CONV_OVF_I4:
9182 case CEE_CONV_OVF_I1:
9183 case CEE_CONV_OVF_I2:
9184 case CEE_CONV_OVF_I:
9185 case CEE_CONV_OVF_U:
9188 if (sp [-1]->type == STACK_R8) {
9189 ADD_UNOP (CEE_CONV_OVF_I8);
9196 case CEE_CONV_OVF_U1:
9197 case CEE_CONV_OVF_U2:
9198 case CEE_CONV_OVF_U4:
9201 if (sp [-1]->type == STACK_R8) {
9202 ADD_UNOP (CEE_CONV_OVF_U8);
9209 case CEE_CONV_OVF_I1_UN:
9210 case CEE_CONV_OVF_I2_UN:
9211 case CEE_CONV_OVF_I4_UN:
9212 case CEE_CONV_OVF_I8_UN:
9213 case CEE_CONV_OVF_U1_UN:
9214 case CEE_CONV_OVF_U2_UN:
9215 case CEE_CONV_OVF_U4_UN:
9216 case CEE_CONV_OVF_U8_UN:
9217 case CEE_CONV_OVF_I_UN:
9218 case CEE_CONV_OVF_U_UN:
9225 CHECK_CFG_EXCEPTION;
9229 case CEE_ADD_OVF_UN:
9231 case CEE_MUL_OVF_UN:
9233 case CEE_SUB_OVF_UN:
9239 GSHAREDVT_FAILURE (*ip);
9242 token = read32 (ip + 1);
9243 klass = mini_get_class (method, token, generic_context);
9244 CHECK_TYPELOAD (klass);
9246 if (generic_class_is_reference_type (cfg, klass)) {
9247 MonoInst *store, *load;
9248 int dreg = alloc_ireg_ref (cfg);
9250 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
9251 load->flags |= ins_flag;
9252 MONO_ADD_INS (cfg->cbb, load);
9254 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
9255 store->flags |= ins_flag;
9256 MONO_ADD_INS (cfg->cbb, store);
9258 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER)
9259 emit_write_barrier (cfg, sp [0], sp [1]);
9261 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
9273 token = read32 (ip + 1);
9274 klass = mini_get_class (method, token, generic_context);
9275 CHECK_TYPELOAD (klass);
9277 /* Optimize the common ldobj+stloc combination */
9287 loc_index = ip [5] - CEE_STLOC_0;
9294 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
9295 CHECK_LOCAL (loc_index);
9297 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
9298 ins->dreg = cfg->locals [loc_index]->dreg;
9304 /* Optimize the ldobj+stobj combination */
9305 /* The reference case ends up being a load+store anyway */
9306 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass)) {
9311 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
9318 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
9327 CHECK_STACK_OVF (1);
9329 n = read32 (ip + 1);
9331 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
9332 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
9333 ins->type = STACK_OBJ;
9336 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
9337 MonoInst *iargs [1];
9339 EMIT_NEW_PCONST (cfg, iargs [0], mono_method_get_wrapper_data (method, n));
9340 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
9342 if (cfg->opt & MONO_OPT_SHARED) {
9343 MonoInst *iargs [3];
9345 if (cfg->compile_aot) {
9346 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
9348 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
9349 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
9350 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
9351 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
9352 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
9354 if (bblock->out_of_line) {
9355 MonoInst *iargs [2];
9357 if (image == mono_defaults.corlib) {
9359 * Avoid relocations in AOT and save some space by using a
9360 * version of helper_ldstr specialized to mscorlib.
9362 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
9363 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
9365 /* Avoid creating the string object */
9366 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
9367 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
9368 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
9372 if (cfg->compile_aot) {
9373 NEW_LDSTRCONST (cfg, ins, image, n);
9375 MONO_ADD_INS (bblock, ins);
9378 NEW_PCONST (cfg, ins, NULL);
9379 ins->type = STACK_OBJ;
9380 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
9382 OUT_OF_MEMORY_FAILURE;
9385 MONO_ADD_INS (bblock, ins);
9394 MonoInst *iargs [2];
9395 MonoMethodSignature *fsig;
9398 MonoInst *vtable_arg = NULL;
9401 token = read32 (ip + 1);
9402 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
9403 if (!cmethod || mono_loader_get_last_error ())
9405 fsig = mono_method_get_signature (cmethod, image, token);
9409 mono_save_token_info (cfg, image, token, cmethod);
9411 if (!mono_class_init (cmethod->klass))
9412 TYPE_LOAD_ERROR (cmethod->klass);
9414 context_used = mini_method_check_context_used (cfg, cmethod);
9416 if (mono_security_cas_enabled ()) {
9417 if (check_linkdemand (cfg, method, cmethod))
9418 INLINE_FAILURE ("linkdemand");
9419 CHECK_CFG_EXCEPTION;
9420 } else if (mono_security_core_clr_enabled ()) {
9421 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9424 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
9425 emit_generic_class_init (cfg, cmethod->klass);
9426 CHECK_TYPELOAD (cmethod->klass);
9430 if (cfg->gsharedvt) {
9431 if (mini_is_gsharedvt_variable_signature (sig))
9432 GSHAREDVT_FAILURE (*ip);
9436 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
9437 mono_method_is_generic_sharable (cmethod, TRUE)) {
9438 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
9439 mono_class_vtable (cfg->domain, cmethod->klass);
9440 CHECK_TYPELOAD (cmethod->klass);
9442 vtable_arg = emit_get_rgctx_method (cfg, context_used,
9443 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
9446 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
9447 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
9449 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
9451 CHECK_TYPELOAD (cmethod->klass);
9452 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
9457 n = fsig->param_count;
9461 * Generate smaller code for the common newobj <exception> instruction in
9462 * argument checking code.
9464 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
9465 is_exception_class (cmethod->klass) && n <= 2 &&
9466 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
9467 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
9468 MonoInst *iargs [3];
9470 g_assert (!vtable_arg);
9474 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
9477 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
9481 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
9486 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
9489 g_assert_not_reached ();
9497 /* move the args to allow room for 'this' in the first position */
9503 /* check_call_signature () requires sp[0] to be set */
9504 this_ins.type = STACK_OBJ;
9506 if (check_call_signature (cfg, fsig, sp))
9511 if (mini_class_is_system_array (cmethod->klass)) {
9512 g_assert (!vtable_arg);
9514 *sp = emit_get_rgctx_method (cfg, context_used,
9515 cmethod, MONO_RGCTX_INFO_METHOD);
9517 /* Avoid varargs in the common case */
9518 if (fsig->param_count == 1)
9519 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
9520 else if (fsig->param_count == 2)
9521 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
9522 else if (fsig->param_count == 3)
9523 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
9524 else if (fsig->param_count == 4)
9525 alloc = mono_emit_jit_icall (cfg, mono_array_new_4, sp);
9527 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
9528 } else if (cmethod->string_ctor) {
9529 g_assert (!context_used);
9530 g_assert (!vtable_arg);
9531 /* we simply pass a null pointer */
9532 EMIT_NEW_PCONST (cfg, *sp, NULL);
9533 /* now call the string ctor */
9534 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, NULL, NULL, NULL);
9536 MonoInst* callvirt_this_arg = NULL;
9538 if (cmethod->klass->valuetype) {
9539 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
9540 emit_init_rvar (cfg, iargs [0]->dreg, &cmethod->klass->byval_arg);
9541 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
9546 * The code generated by mini_emit_virtual_call () expects
9547 * iargs [0] to be a boxed instance, but luckily the vcall
9548 * will be transformed into a normal call there.
9550 } else if (context_used) {
9551 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
9554 MonoVTable *vtable = NULL;
9556 if (!cfg->compile_aot)
9557 vtable = mono_class_vtable (cfg->domain, cmethod->klass);
9558 CHECK_TYPELOAD (cmethod->klass);
9561 * TypeInitializationExceptions thrown from the mono_runtime_class_init
9562 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
9563 * As a workaround, we call class cctors before allocating objects.
9565 if (mini_field_access_needs_cctor_run (cfg, method, cmethod->klass, vtable) && !(g_slist_find (class_inits, cmethod->klass))) {
9566 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, cmethod->klass, helper_sig_class_init_trampoline, NULL);
9567 if (cfg->verbose_level > 2)
9568 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
9569 class_inits = g_slist_prepend (class_inits, cmethod->klass);
9572 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
9575 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
9578 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
9580 /* Now call the actual ctor */
9581 /* Avoid virtual calls to ctors if possible */
9582 if (mono_class_is_marshalbyref (cmethod->klass))
9583 callvirt_this_arg = sp [0];
9586 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
9587 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9588 type_to_eval_stack_type ((cfg), fsig->ret, ins);
9593 CHECK_CFG_EXCEPTION;
9594 } else if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
9595 !disable_inline && mono_method_check_inlining (cfg, cmethod) &&
9596 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE) &&
9597 !g_list_find (dont_inline, cmethod)) {
9600 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, FALSE))) {
9601 cfg->real_offset += 5;
9604 inline_costs += costs - 5;
9606 INLINE_FAILURE ("inline failure");
9607 // FIXME-VT: Clean this up
9608 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))
9609 GSHAREDVT_FAILURE(*ip);
9610 mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, callvirt_this_arg, NULL, NULL);
9612 } else if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
9615 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE);
9616 mono_emit_calli (cfg, fsig, sp, addr, NULL, vtable_arg);
9617 } else if (context_used &&
9618 ((!mono_method_is_generic_sharable (cmethod, TRUE) ||
9619 !mono_class_generic_sharing_enabled (cmethod->klass)) || cfg->gsharedvt)) {
9620 MonoInst *cmethod_addr;
9622 /* Generic calls made out of gsharedvt methods cannot be patched, so use an indirect call */
9624 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
9625 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
9627 mono_emit_calli (cfg, fsig, sp, cmethod_addr, NULL, vtable_arg);
9629 INLINE_FAILURE ("ctor call");
9630 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp,
9631 callvirt_this_arg, NULL, vtable_arg);
9635 if (alloc == NULL) {
9637 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
9638 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
9652 token = read32 (ip + 1);
9653 klass = mini_get_class (method, token, generic_context);
9654 CHECK_TYPELOAD (klass);
9655 if (sp [0]->type != STACK_OBJ)
9658 context_used = mini_class_check_context_used (cfg, klass);
9660 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
9667 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
9670 if (cfg->compile_aot)
9671 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
9673 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
9675 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
9677 *sp++ = emit_castclass_with_cache (cfg, klass, args, &bblock);
9680 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
9681 MonoMethod *mono_castclass;
9682 MonoInst *iargs [1];
9685 mono_castclass = mono_marshal_get_castclass (klass);
9688 save_cast_details (cfg, klass, sp [0]->dreg, TRUE, &bblock);
9689 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
9690 iargs, ip, cfg->real_offset, dont_inline, TRUE);
9691 reset_cast_details (cfg);
9692 CHECK_CFG_EXCEPTION;
9693 g_assert (costs > 0);
9696 cfg->real_offset += 5;
9701 inline_costs += costs;
9704 ins = handle_castclass (cfg, klass, *sp, context_used);
9705 CHECK_CFG_EXCEPTION;
9715 token = read32 (ip + 1);
9716 klass = mini_get_class (method, token, generic_context);
9717 CHECK_TYPELOAD (klass);
9718 if (sp [0]->type != STACK_OBJ)
9721 context_used = mini_class_check_context_used (cfg, klass);
9723 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
9724 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
9731 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
9734 if (cfg->compile_aot)
9735 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
9737 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
9739 *sp++ = mono_emit_method_call (cfg, mono_isinst, args, NULL);
9742 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
9743 MonoMethod *mono_isinst;
9744 MonoInst *iargs [1];
9747 mono_isinst = mono_marshal_get_isinst (klass);
9750 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
9751 iargs, ip, cfg->real_offset, dont_inline, TRUE);
9752 CHECK_CFG_EXCEPTION;
9753 g_assert (costs > 0);
9756 cfg->real_offset += 5;
9761 inline_costs += costs;
9764 ins = handle_isinst (cfg, klass, *sp, context_used);
9765 CHECK_CFG_EXCEPTION;
9772 case CEE_UNBOX_ANY: {
9776 token = read32 (ip + 1);
9777 klass = mini_get_class (method, token, generic_context);
9778 CHECK_TYPELOAD (klass);
9780 mono_save_token_info (cfg, image, token, klass);
9782 context_used = mini_class_check_context_used (cfg, klass);
9784 if (mini_is_gsharedvt_klass (cfg, klass)) {
9785 *sp = handle_unbox_gsharedvt (cfg, klass, *sp, &bblock);
9793 if (generic_class_is_reference_type (cfg, klass)) {
9794 /* CASTCLASS FIXME kill this huge slice of duplicated code*/
9795 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
9802 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
9805 /*FIXME AOT support*/
9806 if (cfg->compile_aot)
9807 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
9809 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
9811 /* The wrapper doesn't inline well so the bloat of inlining doesn't pay off. */
9812 *sp++ = emit_castclass_with_cache (cfg, klass, args, &bblock);
9815 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
9816 MonoMethod *mono_castclass;
9817 MonoInst *iargs [1];
9820 mono_castclass = mono_marshal_get_castclass (klass);
9823 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
9824 iargs, ip, cfg->real_offset, dont_inline, TRUE);
9825 CHECK_CFG_EXCEPTION;
9826 g_assert (costs > 0);
9829 cfg->real_offset += 5;
9833 inline_costs += costs;
9835 ins = handle_castclass (cfg, klass, *sp, context_used);
9836 CHECK_CFG_EXCEPTION;
9844 if (mono_class_is_nullable (klass)) {
9845 ins = handle_unbox_nullable (cfg, *sp, klass, context_used);
9852 ins = handle_unbox (cfg, klass, sp, context_used);
9858 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
9871 token = read32 (ip + 1);
9872 klass = mini_get_class (method, token, generic_context);
9873 CHECK_TYPELOAD (klass);
9875 mono_save_token_info (cfg, image, token, klass);
9877 context_used = mini_class_check_context_used (cfg, klass);
9879 if (generic_class_is_reference_type (cfg, klass)) {
9885 if (klass == mono_defaults.void_class)
9887 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
9889 /* frequent check in generic code: box (struct), brtrue */
9891 // FIXME: LLVM can't handle the inconsistent bb linking
9892 if (!mono_class_is_nullable (klass) &&
9893 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) &&
9894 (ip [5] == CEE_BRTRUE ||
9895 ip [5] == CEE_BRTRUE_S ||
9896 ip [5] == CEE_BRFALSE ||
9897 ip [5] == CEE_BRFALSE_S)) {
9898 gboolean is_true = ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S;
9900 MonoBasicBlock *true_bb, *false_bb;
9904 if (cfg->verbose_level > 3) {
9905 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
9906 printf ("<box+brtrue opt>\n");
9914 target = ip + 1 + (signed char)(*ip);
9921 target = ip + 4 + (gint)(read32 (ip));
9925 g_assert_not_reached ();
9929 * We need to link both bblocks, since it is needed for handling stack
9930 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
9931 * Branching to only one of them would lead to inconsistencies, so
9932 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
9934 GET_BBLOCK (cfg, true_bb, target);
9935 GET_BBLOCK (cfg, false_bb, ip);
9937 mono_link_bblock (cfg, cfg->cbb, true_bb);
9938 mono_link_bblock (cfg, cfg->cbb, false_bb);
9940 if (sp != stack_start) {
9941 handle_stack_args (cfg, stack_start, sp - stack_start);
9943 CHECK_UNVERIFIABLE (cfg);
9946 if (COMPILE_LLVM (cfg)) {
9947 dreg = alloc_ireg (cfg);
9948 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
9949 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
9951 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
9953 /* The JIT can't eliminate the iconst+compare */
9954 MONO_INST_NEW (cfg, ins, OP_BR);
9955 ins->inst_target_bb = is_true ? true_bb : false_bb;
9956 MONO_ADD_INS (cfg->cbb, ins);
9959 start_new_bblock = 1;
9963 *sp++ = handle_box (cfg, val, klass, context_used, &bblock);
9965 CHECK_CFG_EXCEPTION;
9974 token = read32 (ip + 1);
9975 klass = mini_get_class (method, token, generic_context);
9976 CHECK_TYPELOAD (klass);
9978 mono_save_token_info (cfg, image, token, klass);
9980 context_used = mini_class_check_context_used (cfg, klass);
9982 if (mono_class_is_nullable (klass)) {
9985 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
9986 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
9990 ins = handle_unbox (cfg, klass, sp, context_used);
10003 MonoClassField *field;
10004 #ifndef DISABLE_REMOTING
10008 gboolean is_instance;
10010 gpointer addr = NULL;
10011 gboolean is_special_static;
10013 MonoInst *store_val = NULL;
10014 MonoInst *thread_ins;
10017 is_instance = (op == CEE_LDFLD || op == CEE_LDFLDA || op == CEE_STFLD);
10019 if (op == CEE_STFLD) {
10022 store_val = sp [1];
10027 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
10029 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
10032 if (op == CEE_STSFLD) {
10035 store_val = sp [0];
10040 token = read32 (ip + 1);
10041 if (method->wrapper_type != MONO_WRAPPER_NONE) {
10042 field = mono_method_get_wrapper_data (method, token);
10043 klass = field->parent;
10046 field = mono_field_from_token (image, token, &klass, generic_context);
10050 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
10051 FIELD_ACCESS_FAILURE;
10052 mono_class_init (klass);
10054 if (is_instance && *ip != CEE_LDFLDA && is_magic_tls_access (field))
10057 /* if the class is Critical then transparent code cannot access it's fields */
10058 if (!is_instance && mono_security_core_clr_enabled ())
10059 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
10061 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
10062 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
10063 if (mono_security_core_clr_enabled ())
10064 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
10068 * LDFLD etc. is usable on static fields as well, so convert those cases to
10071 if (is_instance && field->type->attrs & FIELD_ATTRIBUTE_STATIC) {
10083 g_assert_not_reached ();
10085 is_instance = FALSE;
10088 context_used = mini_class_check_context_used (cfg, klass);
10090 /* INSTANCE CASE */
10092 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
10093 if (op == CEE_STFLD) {
10094 if (target_type_is_incompatible (cfg, field->type, sp [1]))
10096 #ifndef DISABLE_REMOTING
10097 if ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class) {
10098 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
10099 MonoInst *iargs [5];
10101 GSHAREDVT_FAILURE (op);
10103 iargs [0] = sp [0];
10104 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10105 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
10106 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
10108 iargs [4] = sp [1];
10110 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
10111 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
10112 iargs, ip, cfg->real_offset, dont_inline, TRUE);
10113 CHECK_CFG_EXCEPTION;
10114 g_assert (costs > 0);
10116 cfg->real_offset += 5;
10119 inline_costs += costs;
10121 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
10128 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
10130 if (mini_is_gsharedvt_klass (cfg, klass)) {
10131 MonoInst *offset_ins;
10133 context_used = mini_class_check_context_used (cfg, klass);
10135 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10136 dreg = alloc_ireg_mp (cfg);
10137 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10138 /* The decomposition will call mini_emit_stobj () which will emit a wbarrier if needed */
10139 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, dreg, 0, sp [1]->dreg);
10141 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
10143 if (sp [0]->opcode != OP_LDADDR)
10144 store->flags |= MONO_INST_FAULT;
10146 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
10147 /* insert call to write barrier */
10151 dreg = alloc_ireg_mp (cfg);
10152 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
10153 emit_write_barrier (cfg, ptr, sp [1]);
10156 store->flags |= ins_flag;
10163 #ifndef DISABLE_REMOTING
10164 if (is_instance && ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class)) {
10165 MonoMethod *wrapper = (op == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
10166 MonoInst *iargs [4];
10168 GSHAREDVT_FAILURE (op);
10170 iargs [0] = sp [0];
10171 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10172 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
10173 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
10174 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
10175 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
10176 iargs, ip, cfg->real_offset, dont_inline, TRUE);
10177 CHECK_CFG_EXCEPTION;
10179 g_assert (costs > 0);
10181 cfg->real_offset += 5;
10185 inline_costs += costs;
10187 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
10193 if (sp [0]->type == STACK_VTYPE) {
10196 /* Have to compute the address of the variable */
10198 var = get_vreg_to_inst (cfg, sp [0]->dreg);
10200 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
10202 g_assert (var->klass == klass);
10204 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
10208 if (op == CEE_LDFLDA) {
10209 if (is_magic_tls_access (field)) {
10210 GSHAREDVT_FAILURE (*ip);
10212 *sp++ = create_magic_tls_access (cfg, field, &cached_tls_addr, ins);
10214 if (sp [0]->type == STACK_OBJ) {
10215 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
10216 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
10219 dreg = alloc_ireg_mp (cfg);
10221 if (mini_is_gsharedvt_klass (cfg, klass)) {
10222 MonoInst *offset_ins;
10224 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10225 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10227 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
10229 ins->klass = mono_class_from_mono_type (field->type);
10230 ins->type = STACK_MP;
10236 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
10238 if (mini_is_gsharedvt_klass (cfg, klass)) {
10239 MonoInst *offset_ins;
10241 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10242 dreg = alloc_ireg_mp (cfg);
10243 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10244 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, dreg, 0);
10246 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
10248 load->flags |= ins_flag;
10249 if (sp [0]->opcode != OP_LDADDR)
10250 load->flags |= MONO_INST_FAULT;
10264 * We can only support shared generic static
10265 * field access on architectures where the
10266 * trampoline code has been extended to handle
10267 * the generic class init.
10269 #ifndef MONO_ARCH_VTABLE_REG
10270 GENERIC_SHARING_FAILURE (op);
10273 context_used = mini_class_check_context_used (cfg, klass);
10275 ftype = mono_field_get_type (field);
10277 if (ftype->attrs & FIELD_ATTRIBUTE_LITERAL)
10280 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
10281 * to be called here.
10283 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
10284 mono_class_vtable (cfg->domain, klass);
10285 CHECK_TYPELOAD (klass);
10287 mono_domain_lock (cfg->domain);
10288 if (cfg->domain->special_static_fields)
10289 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
10290 mono_domain_unlock (cfg->domain);
10292 is_special_static = mono_class_field_is_special_static (field);
10294 if (is_special_static && ((gsize)addr & 0x80000000) == 0)
10295 thread_ins = mono_get_thread_intrinsic (cfg);
10299 /* Generate IR to compute the field address */
10300 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && thread_ins && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
10302 * Fast access to TLS data
10303 * Inline version of get_thread_static_data () in
10307 int idx, static_data_reg, array_reg, dreg;
10309 GSHAREDVT_FAILURE (op);
10311 // offset &= 0x7fffffff;
10312 // idx = (offset >> 24) - 1;
10313 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
10314 MONO_ADD_INS (cfg->cbb, thread_ins);
10315 static_data_reg = alloc_ireg (cfg);
10316 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, G_STRUCT_OFFSET (MonoInternalThread, static_data));
10318 if (cfg->compile_aot) {
10319 int offset_reg, offset2_reg, idx_reg;
10321 /* For TLS variables, this will return the TLS offset */
10322 EMIT_NEW_SFLDACONST (cfg, ins, field);
10323 offset_reg = ins->dreg;
10324 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
10325 idx_reg = alloc_ireg (cfg);
10326 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
10327 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
10328 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
10329 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
10330 array_reg = alloc_ireg (cfg);
10331 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
10332 offset2_reg = alloc_ireg (cfg);
10333 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
10334 dreg = alloc_ireg (cfg);
10335 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
10337 offset = (gsize)addr & 0x7fffffff;
10338 idx = (offset >> 24) - 1;
10340 array_reg = alloc_ireg (cfg);
10341 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
10342 dreg = alloc_ireg (cfg);
10343 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, (offset & 0xffffff));
10345 } else if ((cfg->opt & MONO_OPT_SHARED) ||
10346 (cfg->compile_aot && is_special_static) ||
10347 (context_used && is_special_static)) {
10348 MonoInst *iargs [2];
10350 g_assert (field->parent);
10351 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10352 if (context_used) {
10353 iargs [1] = emit_get_rgctx_field (cfg, context_used,
10354 field, MONO_RGCTX_INFO_CLASS_FIELD);
10356 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
10358 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
10359 } else if (context_used) {
10360 MonoInst *static_data;
10363 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
10364 method->klass->name_space, method->klass->name, method->name,
10365 depth, field->offset);
10368 if (mono_class_needs_cctor_run (klass, method))
10369 emit_generic_class_init (cfg, klass);
10372 * The pointer we're computing here is
10374 * super_info.static_data + field->offset
10376 static_data = emit_get_rgctx_klass (cfg, context_used,
10377 klass, MONO_RGCTX_INFO_STATIC_DATA);
10379 if (mini_is_gsharedvt_klass (cfg, klass)) {
10380 MonoInst *offset_ins;
10382 offset_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10383 dreg = alloc_ireg_mp (cfg);
10384 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, static_data->dreg, offset_ins->dreg);
10385 } else if (field->offset == 0) {
10388 int addr_reg = mono_alloc_preg (cfg);
10389 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
10391 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
10392 MonoInst *iargs [2];
10394 g_assert (field->parent);
10395 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10396 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
10397 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
10399 MonoVTable *vtable = NULL;
10401 if (!cfg->compile_aot)
10402 vtable = mono_class_vtable (cfg->domain, klass);
10403 CHECK_TYPELOAD (klass);
10406 if (mini_field_access_needs_cctor_run (cfg, method, klass, vtable)) {
10407 if (!(g_slist_find (class_inits, klass))) {
10408 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, klass, helper_sig_class_init_trampoline, NULL);
10409 if (cfg->verbose_level > 2)
10410 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
10411 class_inits = g_slist_prepend (class_inits, klass);
10414 if (cfg->run_cctors) {
10416 /* This makes so that inline cannot trigger */
10417 /* .cctors: too many apps depend on them */
10418 /* running with a specific order... */
10420 if (! vtable->initialized)
10421 INLINE_FAILURE ("class init");
10422 ex = mono_runtime_class_init_full (vtable, FALSE);
10424 set_exception_object (cfg, ex);
10425 goto exception_exit;
10429 if (cfg->compile_aot)
10430 EMIT_NEW_SFLDACONST (cfg, ins, field);
10433 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
10435 EMIT_NEW_PCONST (cfg, ins, addr);
10438 MonoInst *iargs [1];
10439 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
10440 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
10444 /* Generate IR to do the actual load/store operation */
10446 if (op == CEE_LDSFLDA) {
10447 ins->klass = mono_class_from_mono_type (ftype);
10448 ins->type = STACK_PTR;
10450 } else if (op == CEE_STSFLD) {
10453 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, ftype, ins->dreg, 0, store_val->dreg);
10454 store->flags |= ins_flag;
10456 gboolean is_const = FALSE;
10457 MonoVTable *vtable = NULL;
10458 gpointer addr = NULL;
10460 if (!context_used) {
10461 vtable = mono_class_vtable (cfg->domain, klass);
10462 CHECK_TYPELOAD (klass);
10464 if ((ftype->attrs & FIELD_ATTRIBUTE_INIT_ONLY) && (((addr = mono_aot_readonly_field_override (field)) != NULL) ||
10465 (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) && vtable->initialized))) {
10466 int ro_type = ftype->type;
10468 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
10469 if (ro_type == MONO_TYPE_VALUETYPE && ftype->data.klass->enumtype) {
10470 ro_type = mono_class_enum_basetype (ftype->data.klass)->type;
10473 GSHAREDVT_FAILURE (op);
10475 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
10478 case MONO_TYPE_BOOLEAN:
10480 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
10484 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
10487 case MONO_TYPE_CHAR:
10489 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
10493 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
10498 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
10502 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
10507 case MONO_TYPE_PTR:
10508 case MONO_TYPE_FNPTR:
10509 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
10510 type_to_eval_stack_type ((cfg), field->type, *sp);
10513 case MONO_TYPE_STRING:
10514 case MONO_TYPE_OBJECT:
10515 case MONO_TYPE_CLASS:
10516 case MONO_TYPE_SZARRAY:
10517 case MONO_TYPE_ARRAY:
10518 if (!mono_gc_is_moving ()) {
10519 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
10520 type_to_eval_stack_type ((cfg), field->type, *sp);
10528 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
10533 case MONO_TYPE_VALUETYPE:
10543 CHECK_STACK_OVF (1);
10545 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
10546 load->flags |= ins_flag;
10559 token = read32 (ip + 1);
10560 klass = mini_get_class (method, token, generic_context);
10561 CHECK_TYPELOAD (klass);
10562 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
10563 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
10564 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
10565 generic_class_is_reference_type (cfg, klass)) {
10566 /* insert call to write barrier */
10567 emit_write_barrier (cfg, sp [0], sp [1]);
10579 const char *data_ptr;
10581 guint32 field_token;
10587 token = read32 (ip + 1);
10589 klass = mini_get_class (method, token, generic_context);
10590 CHECK_TYPELOAD (klass);
10592 context_used = mini_class_check_context_used (cfg, klass);
10594 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
10595 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_OVF_U4);
10596 ins->sreg1 = sp [0]->dreg;
10597 ins->type = STACK_I4;
10598 ins->dreg = alloc_ireg (cfg);
10599 MONO_ADD_INS (cfg->cbb, ins);
10600 *sp = mono_decompose_opcode (cfg, ins);
10603 if (context_used) {
10604 MonoInst *args [3];
10605 MonoClass *array_class = mono_array_class_get (klass, 1);
10606 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class);
10608 /* FIXME: Use OP_NEWARR and decompose later to help abcrem */
10611 args [0] = emit_get_rgctx_klass (cfg, context_used,
10612 array_class, MONO_RGCTX_INFO_VTABLE);
10617 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
10619 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
10621 if (cfg->opt & MONO_OPT_SHARED) {
10622 /* Decompose now to avoid problems with references to the domainvar */
10623 MonoInst *iargs [3];
10625 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10626 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10627 iargs [2] = sp [0];
10629 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
10631 /* Decompose later since it is needed by abcrem */
10632 MonoClass *array_type = mono_array_class_get (klass, 1);
10633 mono_class_vtable (cfg->domain, array_type);
10634 CHECK_TYPELOAD (array_type);
10636 MONO_INST_NEW (cfg, ins, OP_NEWARR);
10637 ins->dreg = alloc_ireg_ref (cfg);
10638 ins->sreg1 = sp [0]->dreg;
10639 ins->inst_newa_class = klass;
10640 ins->type = STACK_OBJ;
10641 ins->klass = array_type;
10642 MONO_ADD_INS (cfg->cbb, ins);
10643 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
10644 cfg->cbb->has_array_access = TRUE;
10646 /* Needed so mono_emit_load_get_addr () gets called */
10647 mono_get_got_var (cfg);
10657 * we inline/optimize the initialization sequence if possible.
10658 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
10659 * for small sizes open code the memcpy
10660 * ensure the rva field is big enough
10662 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
10663 MonoMethod *memcpy_method = get_memcpy_method ();
10664 MonoInst *iargs [3];
10665 int add_reg = alloc_ireg_mp (cfg);
10667 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, G_STRUCT_OFFSET (MonoArray, vector));
10668 if (cfg->compile_aot) {
10669 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
10671 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
10673 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
10674 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
10683 if (sp [0]->type != STACK_OBJ)
10686 MONO_INST_NEW (cfg, ins, OP_LDLEN);
10687 ins->dreg = alloc_preg (cfg);
10688 ins->sreg1 = sp [0]->dreg;
10689 ins->type = STACK_I4;
10690 /* This flag will be inherited by the decomposition */
10691 ins->flags |= MONO_INST_FAULT;
10692 MONO_ADD_INS (cfg->cbb, ins);
10693 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
10694 cfg->cbb->has_array_access = TRUE;
10702 if (sp [0]->type != STACK_OBJ)
10705 cfg->flags |= MONO_CFG_HAS_LDELEMA;
10707 klass = mini_get_class (method, read32 (ip + 1), generic_context);
10708 CHECK_TYPELOAD (klass);
10709 /* we need to make sure that this array is exactly the type it needs
10710 * to be for correctness. the wrappers are lax with their usage
10711 * so we need to ignore them here
10713 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
10714 MonoClass *array_class = mono_array_class_get (klass, 1);
10715 mini_emit_check_array_type (cfg, sp [0], array_class);
10716 CHECK_TYPELOAD (array_class);
10720 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
10725 case CEE_LDELEM_I1:
10726 case CEE_LDELEM_U1:
10727 case CEE_LDELEM_I2:
10728 case CEE_LDELEM_U2:
10729 case CEE_LDELEM_I4:
10730 case CEE_LDELEM_U4:
10731 case CEE_LDELEM_I8:
10733 case CEE_LDELEM_R4:
10734 case CEE_LDELEM_R8:
10735 case CEE_LDELEM_REF: {
10741 if (*ip == CEE_LDELEM) {
10743 token = read32 (ip + 1);
10744 klass = mini_get_class (method, token, generic_context);
10745 CHECK_TYPELOAD (klass);
10746 mono_class_init (klass);
10749 klass = array_access_to_klass (*ip);
10751 if (sp [0]->type != STACK_OBJ)
10754 cfg->flags |= MONO_CFG_HAS_LDELEMA;
10756 if (mini_is_gsharedvt_variable_klass (cfg, klass)) {
10757 // FIXME-VT: OP_ICONST optimization
10758 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
10759 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
10760 ins->opcode = OP_LOADV_MEMBASE;
10761 } else if (sp [1]->opcode == OP_ICONST) {
10762 int array_reg = sp [0]->dreg;
10763 int index_reg = sp [1]->dreg;
10764 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
10766 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
10767 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
10769 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
10770 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
10773 if (*ip == CEE_LDELEM)
10780 case CEE_STELEM_I1:
10781 case CEE_STELEM_I2:
10782 case CEE_STELEM_I4:
10783 case CEE_STELEM_I8:
10784 case CEE_STELEM_R4:
10785 case CEE_STELEM_R8:
10786 case CEE_STELEM_REF:
10791 cfg->flags |= MONO_CFG_HAS_LDELEMA;
10793 if (*ip == CEE_STELEM) {
10795 token = read32 (ip + 1);
10796 klass = mini_get_class (method, token, generic_context);
10797 CHECK_TYPELOAD (klass);
10798 mono_class_init (klass);
10801 klass = array_access_to_klass (*ip);
10803 if (sp [0]->type != STACK_OBJ)
10806 emit_array_store (cfg, klass, sp, TRUE);
10808 if (*ip == CEE_STELEM)
10815 case CEE_CKFINITE: {
10819 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
10820 ins->sreg1 = sp [0]->dreg;
10821 ins->dreg = alloc_freg (cfg);
10822 ins->type = STACK_R8;
10823 MONO_ADD_INS (bblock, ins);
10825 *sp++ = mono_decompose_opcode (cfg, ins);
10830 case CEE_REFANYVAL: {
10831 MonoInst *src_var, *src;
10833 int klass_reg = alloc_preg (cfg);
10834 int dreg = alloc_preg (cfg);
10836 GSHAREDVT_FAILURE (*ip);
10839 MONO_INST_NEW (cfg, ins, *ip);
10842 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
10843 CHECK_TYPELOAD (klass);
10844 mono_class_init (klass);
10846 context_used = mini_class_check_context_used (cfg, klass);
10849 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
10851 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
10852 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
10853 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass));
10855 if (context_used) {
10856 MonoInst *klass_ins;
10858 klass_ins = emit_get_rgctx_klass (cfg, context_used,
10859 klass, MONO_RGCTX_INFO_KLASS);
10862 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
10863 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
10865 mini_emit_class_check (cfg, klass_reg, klass);
10867 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, value));
10868 ins->type = STACK_MP;
10873 case CEE_MKREFANY: {
10874 MonoInst *loc, *addr;
10876 GSHAREDVT_FAILURE (*ip);
10879 MONO_INST_NEW (cfg, ins, *ip);
10882 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
10883 CHECK_TYPELOAD (klass);
10884 mono_class_init (klass);
10886 context_used = mini_class_check_context_used (cfg, klass);
10888 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
10889 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
10891 if (context_used) {
10892 MonoInst *const_ins;
10893 int type_reg = alloc_preg (cfg);
10895 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
10896 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
10897 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, G_STRUCT_OFFSET (MonoClass, byval_arg));
10898 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
10899 } else if (cfg->compile_aot) {
10900 int const_reg = alloc_preg (cfg);
10901 int type_reg = alloc_preg (cfg);
10903 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
10904 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
10905 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, G_STRUCT_OFFSET (MonoClass, byval_arg));
10906 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
10908 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
10909 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), klass);
10911 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
10913 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
10914 ins->type = STACK_VTYPE;
10915 ins->klass = mono_defaults.typed_reference_class;
10920 case CEE_LDTOKEN: {
10922 MonoClass *handle_class;
10924 CHECK_STACK_OVF (1);
10927 n = read32 (ip + 1);
10929 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
10930 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
10931 handle = mono_method_get_wrapper_data (method, n);
10932 handle_class = mono_method_get_wrapper_data (method, n + 1);
10933 if (handle_class == mono_defaults.typehandle_class)
10934 handle = &((MonoClass*)handle)->byval_arg;
10937 handle = mono_ldtoken (image, n, &handle_class, generic_context);
10941 mono_class_init (handle_class);
10942 if (cfg->generic_sharing_context) {
10943 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
10944 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
10945 /* This case handles ldtoken
10946 of an open type, like for
10949 } else if (handle_class == mono_defaults.typehandle_class) {
10950 /* If we get a MONO_TYPE_CLASS
10951 then we need to provide the
10953 instantiation of it. */
10954 if (mono_type_get_type (handle) == MONO_TYPE_CLASS)
10957 context_used = mini_class_check_context_used (cfg, mono_class_from_mono_type (handle));
10958 } else if (handle_class == mono_defaults.fieldhandle_class)
10959 context_used = mini_class_check_context_used (cfg, ((MonoClassField*)handle)->parent);
10960 else if (handle_class == mono_defaults.methodhandle_class)
10961 context_used = mini_method_check_context_used (cfg, handle);
10963 g_assert_not_reached ();
10966 if ((cfg->opt & MONO_OPT_SHARED) &&
10967 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
10968 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
10969 MonoInst *addr, *vtvar, *iargs [3];
10970 int method_context_used;
10972 method_context_used = mini_method_check_context_used (cfg, method);
10974 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
10976 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
10977 EMIT_NEW_ICONST (cfg, iargs [1], n);
10978 if (method_context_used) {
10979 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
10980 method, MONO_RGCTX_INFO_METHOD);
10981 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
10983 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
10984 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
10986 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
10988 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
10990 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
10992 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
10993 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
10994 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
10995 (cmethod->klass == mono_defaults.systemtype_class) &&
10996 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
10997 MonoClass *tclass = mono_class_from_mono_type (handle);
10999 mono_class_init (tclass);
11000 if (context_used) {
11001 ins = emit_get_rgctx_klass (cfg, context_used,
11002 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
11003 } else if (cfg->compile_aot) {
11004 if (method->wrapper_type) {
11005 if (mono_class_get (tclass->image, tclass->type_token) == tclass && !generic_context) {
11006 /* Special case for static synchronized wrappers */
11007 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
11009 /* FIXME: n is not a normal token */
11011 EMIT_NEW_PCONST (cfg, ins, NULL);
11014 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
11017 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
11019 ins->type = STACK_OBJ;
11020 ins->klass = cmethod->klass;
11023 MonoInst *addr, *vtvar;
11025 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
11027 if (context_used) {
11028 if (handle_class == mono_defaults.typehandle_class) {
11029 ins = emit_get_rgctx_klass (cfg, context_used,
11030 mono_class_from_mono_type (handle),
11031 MONO_RGCTX_INFO_TYPE);
11032 } else if (handle_class == mono_defaults.methodhandle_class) {
11033 ins = emit_get_rgctx_method (cfg, context_used,
11034 handle, MONO_RGCTX_INFO_METHOD);
11035 } else if (handle_class == mono_defaults.fieldhandle_class) {
11036 ins = emit_get_rgctx_field (cfg, context_used,
11037 handle, MONO_RGCTX_INFO_CLASS_FIELD);
11039 g_assert_not_reached ();
11041 } else if (cfg->compile_aot) {
11042 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n, generic_context);
11044 EMIT_NEW_PCONST (cfg, ins, handle);
11046 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11047 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
11048 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11058 MONO_INST_NEW (cfg, ins, OP_THROW);
11060 ins->sreg1 = sp [0]->dreg;
11062 bblock->out_of_line = TRUE;
11063 MONO_ADD_INS (bblock, ins);
11064 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
11065 MONO_ADD_INS (bblock, ins);
11068 link_bblock (cfg, bblock, end_bblock);
11069 start_new_bblock = 1;
11071 case CEE_ENDFINALLY:
11072 /* mono_save_seq_point_info () depends on this */
11073 if (sp != stack_start)
11074 emit_seq_point (cfg, method, ip, FALSE, FALSE);
11075 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
11076 MONO_ADD_INS (bblock, ins);
11078 start_new_bblock = 1;
11081 * Control will leave the method so empty the stack, otherwise
11082 * the next basic block will start with a nonempty stack.
11084 while (sp != stack_start) {
11089 case CEE_LEAVE_S: {
11092 if (*ip == CEE_LEAVE) {
11094 target = ip + 5 + (gint32)read32(ip + 1);
11097 target = ip + 2 + (signed char)(ip [1]);
11100 /* empty the stack */
11101 while (sp != stack_start) {
11106 * If this leave statement is in a catch block, check for a
11107 * pending exception, and rethrow it if necessary.
11108 * We avoid doing this in runtime invoke wrappers, since those are called
11109 * by native code which excepts the wrapper to catch all exceptions.
11111 for (i = 0; i < header->num_clauses; ++i) {
11112 MonoExceptionClause *clause = &header->clauses [i];
11115 * Use <= in the final comparison to handle clauses with multiple
11116 * leave statements, like in bug #78024.
11117 * The ordering of the exception clauses guarantees that we find the
11118 * innermost clause.
11120 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
11122 MonoBasicBlock *dont_throw;
11127 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
11130 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
11132 NEW_BBLOCK (cfg, dont_throw);
11135 * Currently, we always rethrow the abort exception, despite the
11136 * fact that this is not correct. See thread6.cs for an example.
11137 * But propagating the abort exception is more important than
11138 * getting the sematics right.
11140 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
11141 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
11142 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
11144 MONO_START_BB (cfg, dont_throw);
11149 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
11151 MonoExceptionClause *clause;
11153 for (tmp = handlers; tmp; tmp = tmp->next) {
11154 clause = tmp->data;
11155 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
11157 link_bblock (cfg, bblock, tblock);
11158 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
11159 ins->inst_target_bb = tblock;
11160 ins->inst_eh_block = clause;
11161 MONO_ADD_INS (bblock, ins);
11162 bblock->has_call_handler = 1;
11163 if (COMPILE_LLVM (cfg)) {
11164 MonoBasicBlock *target_bb;
11167 * Link the finally bblock with the target, since it will
11168 * conceptually branch there.
11169 * FIXME: Have to link the bblock containing the endfinally.
11171 GET_BBLOCK (cfg, target_bb, target);
11172 link_bblock (cfg, tblock, target_bb);
11175 g_list_free (handlers);
11178 MONO_INST_NEW (cfg, ins, OP_BR);
11179 MONO_ADD_INS (bblock, ins);
11180 GET_BBLOCK (cfg, tblock, target);
11181 link_bblock (cfg, bblock, tblock);
11182 ins->inst_target_bb = tblock;
11183 start_new_bblock = 1;
11185 if (*ip == CEE_LEAVE)
11194 * Mono specific opcodes
11196 case MONO_CUSTOM_PREFIX: {
11198 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
11202 case CEE_MONO_ICALL: {
11204 MonoJitICallInfo *info;
11206 token = read32 (ip + 2);
11207 func = mono_method_get_wrapper_data (method, token);
11208 info = mono_find_jit_icall_by_addr (func);
11210 g_error ("Could not find icall address in wrapper %s", mono_method_full_name (method, 1));
11213 CHECK_STACK (info->sig->param_count);
11214 sp -= info->sig->param_count;
11216 ins = mono_emit_jit_icall (cfg, info->func, sp);
11217 if (!MONO_TYPE_IS_VOID (info->sig->ret))
11221 inline_costs += 10 * num_calls++;
11225 case CEE_MONO_LDPTR: {
11228 CHECK_STACK_OVF (1);
11230 token = read32 (ip + 2);
11232 ptr = mono_method_get_wrapper_data (method, token);
11233 /* FIXME: Generalize this */
11234 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
11235 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
11240 EMIT_NEW_PCONST (cfg, ins, ptr);
11243 inline_costs += 10 * num_calls++;
11244 /* Can't embed random pointers into AOT code */
11248 case CEE_MONO_JIT_ICALL_ADDR: {
11249 MonoJitICallInfo *callinfo;
11252 CHECK_STACK_OVF (1);
11254 token = read32 (ip + 2);
11256 ptr = mono_method_get_wrapper_data (method, token);
11257 callinfo = mono_find_jit_icall_by_addr (ptr);
11258 g_assert (callinfo);
11259 EMIT_NEW_JIT_ICALL_ADDRCONST (cfg, ins, (char*)callinfo->name);
11262 inline_costs += 10 * num_calls++;
11265 case CEE_MONO_ICALL_ADDR: {
11266 MonoMethod *cmethod;
11269 CHECK_STACK_OVF (1);
11271 token = read32 (ip + 2);
11273 cmethod = mono_method_get_wrapper_data (method, token);
11275 if (cfg->compile_aot) {
11276 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
11278 ptr = mono_lookup_internal_call (cmethod);
11280 EMIT_NEW_PCONST (cfg, ins, ptr);
11286 case CEE_MONO_VTADDR: {
11287 MonoInst *src_var, *src;
11293 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
11294 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
11299 case CEE_MONO_NEWOBJ: {
11300 MonoInst *iargs [2];
11302 CHECK_STACK_OVF (1);
11304 token = read32 (ip + 2);
11305 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
11306 mono_class_init (klass);
11307 NEW_DOMAINCONST (cfg, iargs [0]);
11308 MONO_ADD_INS (cfg->cbb, iargs [0]);
11309 NEW_CLASSCONST (cfg, iargs [1], klass);
11310 MONO_ADD_INS (cfg->cbb, iargs [1]);
11311 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
11313 inline_costs += 10 * num_calls++;
11316 case CEE_MONO_OBJADDR:
11319 MONO_INST_NEW (cfg, ins, OP_MOVE);
11320 ins->dreg = alloc_ireg_mp (cfg);
11321 ins->sreg1 = sp [0]->dreg;
11322 ins->type = STACK_MP;
11323 MONO_ADD_INS (cfg->cbb, ins);
11327 case CEE_MONO_LDNATIVEOBJ:
11329 * Similar to LDOBJ, but instead load the unmanaged
11330 * representation of the vtype to the stack.
11335 token = read32 (ip + 2);
11336 klass = mono_method_get_wrapper_data (method, token);
11337 g_assert (klass->valuetype);
11338 mono_class_init (klass);
11341 MonoInst *src, *dest, *temp;
11344 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
11345 temp->backend.is_pinvoke = 1;
11346 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
11347 mini_emit_stobj (cfg, dest, src, klass, TRUE);
11349 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
11350 dest->type = STACK_VTYPE;
11351 dest->klass = klass;
11357 case CEE_MONO_RETOBJ: {
11359 * Same as RET, but return the native representation of a vtype
11362 g_assert (cfg->ret);
11363 g_assert (mono_method_signature (method)->pinvoke);
11368 token = read32 (ip + 2);
11369 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
11371 if (!cfg->vret_addr) {
11372 g_assert (cfg->ret_var_is_local);
11374 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
11376 EMIT_NEW_RETLOADA (cfg, ins);
11378 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
11380 if (sp != stack_start)
11383 MONO_INST_NEW (cfg, ins, OP_BR);
11384 ins->inst_target_bb = end_bblock;
11385 MONO_ADD_INS (bblock, ins);
11386 link_bblock (cfg, bblock, end_bblock);
11387 start_new_bblock = 1;
11391 case CEE_MONO_CISINST:
11392 case CEE_MONO_CCASTCLASS: {
11397 token = read32 (ip + 2);
11398 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
11399 if (ip [1] == CEE_MONO_CISINST)
11400 ins = handle_cisinst (cfg, klass, sp [0]);
11402 ins = handle_ccastclass (cfg, klass, sp [0]);
11408 case CEE_MONO_SAVE_LMF:
11409 case CEE_MONO_RESTORE_LMF:
11410 #ifdef MONO_ARCH_HAVE_LMF_OPS
11411 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
11412 MONO_ADD_INS (bblock, ins);
11413 cfg->need_lmf_area = TRUE;
11417 case CEE_MONO_CLASSCONST:
11418 CHECK_STACK_OVF (1);
11420 token = read32 (ip + 2);
11421 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
11424 inline_costs += 10 * num_calls++;
11426 case CEE_MONO_NOT_TAKEN:
11427 bblock->out_of_line = TRUE;
11430 case CEE_MONO_TLS: {
11433 CHECK_STACK_OVF (1);
11435 key = (gint32)read32 (ip + 2);
11436 g_assert (key < TLS_KEY_NUM);
11438 ins = mono_create_tls_get (cfg, key);
11440 if (cfg->compile_aot) {
11442 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
11443 ins->dreg = alloc_preg (cfg);
11444 ins->type = STACK_PTR;
11446 g_assert_not_reached ();
11449 ins->type = STACK_PTR;
11450 MONO_ADD_INS (bblock, ins);
11455 case CEE_MONO_DYN_CALL: {
11456 MonoCallInst *call;
11458 /* It would be easier to call a trampoline, but that would put an
11459 * extra frame on the stack, confusing exception handling. So
11460 * implement it inline using an opcode for now.
11463 if (!cfg->dyn_call_var) {
11464 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
11465 /* prevent it from being register allocated */
11466 cfg->dyn_call_var->flags |= MONO_INST_VOLATILE;
11469 /* Has to use a call inst since it local regalloc expects it */
11470 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
11471 ins = (MonoInst*)call;
11473 ins->sreg1 = sp [0]->dreg;
11474 ins->sreg2 = sp [1]->dreg;
11475 MONO_ADD_INS (bblock, ins);
11477 cfg->param_area = MAX (cfg->param_area, MONO_ARCH_DYN_CALL_PARAM_AREA);
11480 inline_costs += 10 * num_calls++;
11484 case CEE_MONO_MEMORY_BARRIER: {
11486 emit_memory_barrier (cfg, (int)read32 (ip + 1));
11490 case CEE_MONO_JIT_ATTACH: {
11491 MonoInst *args [16];
11492 MonoInst *ad_ins, *lmf_ins;
11493 MonoBasicBlock *next_bb = NULL;
11495 cfg->orig_domain_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
11497 EMIT_NEW_PCONST (cfg, ins, NULL);
11498 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
11504 ad_ins = mono_get_domain_intrinsic (cfg);
11505 lmf_ins = mono_get_lmf_intrinsic (cfg);
11508 if (MONO_ARCH_HAVE_TLS_GET && ad_ins && lmf_ins) {
11509 NEW_BBLOCK (cfg, next_bb);
11511 MONO_ADD_INS (cfg->cbb, ad_ins);
11512 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, ad_ins->dreg, 0);
11513 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, next_bb);
11515 MONO_ADD_INS (cfg->cbb, lmf_ins);
11516 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, lmf_ins->dreg, 0);
11517 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, next_bb);
11520 if (cfg->compile_aot) {
11521 /* AOT code is only used in the root domain */
11522 EMIT_NEW_PCONST (cfg, args [0], NULL);
11524 EMIT_NEW_PCONST (cfg, args [0], cfg->domain);
11526 ins = mono_emit_jit_icall (cfg, mono_jit_thread_attach, args);
11527 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
11530 MONO_START_BB (cfg, next_bb);
11536 case CEE_MONO_JIT_DETACH: {
11537 MonoInst *args [16];
11539 /* Restore the original domain */
11540 dreg = alloc_ireg (cfg);
11541 EMIT_NEW_UNALU (cfg, args [0], OP_MOVE, dreg, cfg->orig_domain_var->dreg);
11542 mono_emit_jit_icall (cfg, mono_jit_set_domain, args);
11547 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
11553 case CEE_PREFIX1: {
11556 case CEE_ARGLIST: {
11557 /* somewhat similar to LDTOKEN */
11558 MonoInst *addr, *vtvar;
11559 CHECK_STACK_OVF (1);
11560 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
11562 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11563 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
11565 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11566 ins->type = STACK_VTYPE;
11567 ins->klass = mono_defaults.argumenthandle_class;
11580 * The following transforms:
11581 * CEE_CEQ into OP_CEQ
11582 * CEE_CGT into OP_CGT
11583 * CEE_CGT_UN into OP_CGT_UN
11584 * CEE_CLT into OP_CLT
11585 * CEE_CLT_UN into OP_CLT_UN
11587 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
11589 MONO_INST_NEW (cfg, ins, cmp->opcode);
11591 cmp->sreg1 = sp [0]->dreg;
11592 cmp->sreg2 = sp [1]->dreg;
11593 type_from_op (cmp, sp [0], sp [1]);
11595 if ((sp [0]->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
11596 cmp->opcode = OP_LCOMPARE;
11597 else if (sp [0]->type == STACK_R8)
11598 cmp->opcode = OP_FCOMPARE;
11600 cmp->opcode = OP_ICOMPARE;
11601 MONO_ADD_INS (bblock, cmp);
11602 ins->type = STACK_I4;
11603 ins->dreg = alloc_dreg (cfg, ins->type);
11604 type_from_op (ins, sp [0], sp [1]);
11606 if (cmp->opcode == OP_FCOMPARE) {
11608 * The backends expect the fceq opcodes to do the
11611 cmp->opcode = OP_NOP;
11612 ins->sreg1 = cmp->sreg1;
11613 ins->sreg2 = cmp->sreg2;
11615 MONO_ADD_INS (bblock, ins);
11621 MonoInst *argconst;
11622 MonoMethod *cil_method;
11624 CHECK_STACK_OVF (1);
11626 n = read32 (ip + 2);
11627 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
11628 if (!cmethod || mono_loader_get_last_error ())
11630 mono_class_init (cmethod->klass);
11632 mono_save_token_info (cfg, image, n, cmethod);
11634 context_used = mini_method_check_context_used (cfg, cmethod);
11636 cil_method = cmethod;
11637 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
11638 METHOD_ACCESS_FAILURE;
11640 if (mono_security_cas_enabled ()) {
11641 if (check_linkdemand (cfg, method, cmethod))
11642 INLINE_FAILURE ("linkdemand");
11643 CHECK_CFG_EXCEPTION;
11644 } else if (mono_security_core_clr_enabled ()) {
11645 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
11649 * Optimize the common case of ldftn+delegate creation
11651 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
11652 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
11653 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
11654 MonoInst *target_ins;
11655 MonoMethod *invoke;
11656 int invoke_context_used;
11658 invoke = mono_get_delegate_invoke (ctor_method->klass);
11659 if (!invoke || !mono_method_signature (invoke))
11662 invoke_context_used = mini_method_check_context_used (cfg, invoke);
11664 target_ins = sp [-1];
11666 if (mono_security_core_clr_enabled ())
11667 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method, bblock, ip);
11669 if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
11670 /*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
11671 if (mono_method_signature (invoke)->param_count == mono_method_signature (cmethod)->param_count) {
11672 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0);
11673 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException");
11677 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE)
11678 /* FIXME: SGEN support */
11679 if (invoke_context_used == 0) {
11681 if (cfg->verbose_level > 3)
11682 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
11684 *sp = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used);
11685 CHECK_CFG_EXCEPTION;
11694 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
11695 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
11699 inline_costs += 10 * num_calls++;
11702 case CEE_LDVIRTFTN: {
11703 MonoInst *args [2];
11707 n = read32 (ip + 2);
11708 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
11709 if (!cmethod || mono_loader_get_last_error ())
11711 mono_class_init (cmethod->klass);
11713 context_used = mini_method_check_context_used (cfg, cmethod);
11715 if (mono_security_cas_enabled ()) {
11716 if (check_linkdemand (cfg, method, cmethod))
11717 INLINE_FAILURE ("linkdemand");
11718 CHECK_CFG_EXCEPTION;
11719 } else if (mono_security_core_clr_enabled ()) {
11720 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
11726 args [1] = emit_get_rgctx_method (cfg, context_used,
11727 cmethod, MONO_RGCTX_INFO_METHOD);
11730 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
11732 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
11735 inline_costs += 10 * num_calls++;
11739 CHECK_STACK_OVF (1);
11741 n = read16 (ip + 2);
11743 EMIT_NEW_ARGLOAD (cfg, ins, n);
11748 CHECK_STACK_OVF (1);
11750 n = read16 (ip + 2);
11752 NEW_ARGLOADA (cfg, ins, n);
11753 MONO_ADD_INS (cfg->cbb, ins);
11761 n = read16 (ip + 2);
11763 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
11765 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
11769 CHECK_STACK_OVF (1);
11771 n = read16 (ip + 2);
11773 EMIT_NEW_LOCLOAD (cfg, ins, n);
11778 unsigned char *tmp_ip;
11779 CHECK_STACK_OVF (1);
11781 n = read16 (ip + 2);
11784 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
11790 EMIT_NEW_LOCLOADA (cfg, ins, n);
11799 n = read16 (ip + 2);
11801 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
11803 emit_stloc_ir (cfg, sp, header, n);
11810 if (sp != stack_start)
11812 if (cfg->method != method)
11814 * Inlining this into a loop in a parent could lead to
11815 * stack overflows which is different behavior than the
11816 * non-inlined case, thus disable inlining in this case.
11818 goto inline_failure;
11820 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
11821 ins->dreg = alloc_preg (cfg);
11822 ins->sreg1 = sp [0]->dreg;
11823 ins->type = STACK_PTR;
11824 MONO_ADD_INS (cfg->cbb, ins);
11826 cfg->flags |= MONO_CFG_HAS_ALLOCA;
11828 ins->flags |= MONO_INST_INIT;
11833 case CEE_ENDFILTER: {
11834 MonoExceptionClause *clause, *nearest;
11835 int cc, nearest_num;
11839 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
11841 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
11842 ins->sreg1 = (*sp)->dreg;
11843 MONO_ADD_INS (bblock, ins);
11844 start_new_bblock = 1;
11849 for (cc = 0; cc < header->num_clauses; ++cc) {
11850 clause = &header->clauses [cc];
11851 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
11852 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
11853 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
11858 g_assert (nearest);
11859 if ((ip - header->code) != nearest->handler_offset)
11864 case CEE_UNALIGNED_:
11865 ins_flag |= MONO_INST_UNALIGNED;
11866 /* FIXME: record alignment? we can assume 1 for now */
11870 case CEE_VOLATILE_:
11871 ins_flag |= MONO_INST_VOLATILE;
11875 ins_flag |= MONO_INST_TAILCALL;
11876 cfg->flags |= MONO_CFG_HAS_TAIL;
11877 /* Can't inline tail calls at this time */
11878 inline_costs += 100000;
11885 token = read32 (ip + 2);
11886 klass = mini_get_class (method, token, generic_context);
11887 CHECK_TYPELOAD (klass);
11888 if (generic_class_is_reference_type (cfg, klass))
11889 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
11891 mini_emit_initobj (cfg, *sp, NULL, klass);
11895 case CEE_CONSTRAINED_:
11897 token = read32 (ip + 2);
11898 constrained_call = mini_get_class (method, token, generic_context);
11899 CHECK_TYPELOAD (constrained_call);
11903 case CEE_INITBLK: {
11904 MonoInst *iargs [3];
11908 if ((ip [1] == CEE_CPBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
11909 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
11910 } else if ((ip [1] == CEE_INITBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
11911 /* emit_memset only works when val == 0 */
11912 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
11914 iargs [0] = sp [0];
11915 iargs [1] = sp [1];
11916 iargs [2] = sp [2];
11917 if (ip [1] == CEE_CPBLK) {
11918 MonoMethod *memcpy_method = get_memcpy_method ();
11919 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
11921 MonoMethod *memset_method = get_memset_method ();
11922 mono_emit_method_call (cfg, memset_method, iargs, NULL);
11932 ins_flag |= MONO_INST_NOTYPECHECK;
11934 ins_flag |= MONO_INST_NORANGECHECK;
11935 /* we ignore the no-nullcheck for now since we
11936 * really do it explicitly only when doing callvirt->call
11940 case CEE_RETHROW: {
11942 int handler_offset = -1;
11944 for (i = 0; i < header->num_clauses; ++i) {
11945 MonoExceptionClause *clause = &header->clauses [i];
11946 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
11947 handler_offset = clause->handler_offset;
11952 bblock->flags |= BB_EXCEPTION_UNSAFE;
11954 g_assert (handler_offset != -1);
11956 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
11957 MONO_INST_NEW (cfg, ins, OP_RETHROW);
11958 ins->sreg1 = load->dreg;
11959 MONO_ADD_INS (bblock, ins);
11961 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
11962 MONO_ADD_INS (bblock, ins);
11965 link_bblock (cfg, bblock, end_bblock);
11966 start_new_bblock = 1;
11974 GSHAREDVT_FAILURE (*ip);
11976 CHECK_STACK_OVF (1);
11978 token = read32 (ip + 2);
11979 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !method->klass->image->dynamic && !generic_context) {
11980 MonoType *type = mono_type_create_from_typespec (image, token);
11981 val = mono_type_size (type, &ialign);
11983 MonoClass *klass = mono_class_get_full (image, token, generic_context);
11984 CHECK_TYPELOAD (klass);
11985 mono_class_init (klass);
11986 val = mono_type_size (&klass->byval_arg, &ialign);
11988 EMIT_NEW_ICONST (cfg, ins, val);
11993 case CEE_REFANYTYPE: {
11994 MonoInst *src_var, *src;
11996 GSHAREDVT_FAILURE (*ip);
12002 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
12004 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
12005 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
12006 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, type));
12011 case CEE_READONLY_:
12024 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
12034 g_warning ("opcode 0x%02x not handled", *ip);
12038 if (start_new_bblock != 1)
12041 bblock->cil_length = ip - bblock->cil_code;
12042 if (bblock->next_bb) {
12043 /* This could already be set because of inlining, #693905 */
12044 MonoBasicBlock *bb = bblock;
12046 while (bb->next_bb)
12048 bb->next_bb = end_bblock;
12050 bblock->next_bb = end_bblock;
12053 if (cfg->method == method && cfg->domainvar) {
12055 MonoInst *get_domain;
12057 cfg->cbb = init_localsbb;
12059 if ((get_domain = mono_get_domain_intrinsic (cfg))) {
12060 MONO_ADD_INS (cfg->cbb, get_domain);
12062 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
12064 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
12065 MONO_ADD_INS (cfg->cbb, store);
12068 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
12069 if (cfg->compile_aot)
12070 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
12071 mono_get_got_var (cfg);
12074 if (cfg->method == method && cfg->got_var)
12075 mono_emit_load_got_addr (cfg);
12077 if (init_localsbb) {
12078 cfg->cbb = init_localsbb;
12080 for (i = 0; i < header->num_locals; ++i) {
12081 emit_init_local (cfg, i, header->locals [i], init_locals);
12085 if (cfg->init_ref_vars && cfg->method == method) {
12086 /* Emit initialization for ref vars */
12087 // FIXME: Avoid duplication initialization for IL locals.
12088 for (i = 0; i < cfg->num_varinfo; ++i) {
12089 MonoInst *ins = cfg->varinfo [i];
12091 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
12092 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
12096 if (cfg->lmf_var && cfg->method == method) {
12097 cfg->cbb = init_localsbb;
12098 emit_push_lmf (cfg);
12102 MonoBasicBlock *bb;
12105 * Make seq points at backward branch targets interruptable.
12107 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
12108 if (bb->code && bb->in_count > 1 && bb->code->opcode == OP_SEQ_POINT)
12109 bb->code->flags |= MONO_INST_SINGLE_STEP_LOC;
12112 /* Add a sequence point for method entry/exit events */
12114 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
12115 MONO_ADD_INS (init_localsbb, ins);
12116 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
12117 MONO_ADD_INS (cfg->bb_exit, ins);
12121 * Add seq points for IL offsets which have line number info, but wasn't generated a seq point during JITting because
12122 * the code they refer to was dead (#11880).
12124 if (sym_seq_points) {
12125 for (i = 0; i < header->code_size; ++i) {
12126 if (mono_bitset_test_fast (seq_point_locs, i) && !mono_bitset_test_fast (seq_point_set_locs, i)) {
12129 NEW_SEQ_POINT (cfg, ins, i, FALSE);
12130 mono_add_seq_point (cfg, NULL, ins, SEQ_POINT_NATIVE_OFFSET_DEAD_CODE);
12137 if (cfg->method == method) {
12138 MonoBasicBlock *bb;
12139 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
12140 bb->region = mono_find_block_region (cfg, bb->real_offset);
12142 mono_create_spvar_for_region (cfg, bb->region);
12143 if (cfg->verbose_level > 2)
12144 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
12148 g_slist_free (class_inits);
12149 dont_inline = g_list_remove (dont_inline, method);
12151 if (inline_costs < 0) {
12154 /* Method is too large */
12155 mname = mono_method_full_name (method, TRUE);
12156 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
12157 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
12159 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
12160 mono_basic_block_free (original_bb);
12164 if ((cfg->verbose_level > 2) && (cfg->method == method))
12165 mono_print_code (cfg, "AFTER METHOD-TO-IR");
12167 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
12168 mono_basic_block_free (original_bb);
12169 return inline_costs;
12172 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
12179 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
12183 set_exception_type_from_invalid_il (cfg, method, ip);
12187 g_slist_free (class_inits);
12188 mono_basic_block_free (original_bb);
12189 dont_inline = g_list_remove (dont_inline, method);
12190 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
12195 store_membase_reg_to_store_membase_imm (int opcode)
12198 case OP_STORE_MEMBASE_REG:
12199 return OP_STORE_MEMBASE_IMM;
12200 case OP_STOREI1_MEMBASE_REG:
12201 return OP_STOREI1_MEMBASE_IMM;
12202 case OP_STOREI2_MEMBASE_REG:
12203 return OP_STOREI2_MEMBASE_IMM;
12204 case OP_STOREI4_MEMBASE_REG:
12205 return OP_STOREI4_MEMBASE_IMM;
12206 case OP_STOREI8_MEMBASE_REG:
12207 return OP_STOREI8_MEMBASE_IMM;
12209 g_assert_not_reached ();
12216 mono_op_to_op_imm (int opcode)
12220 return OP_IADD_IMM;
12222 return OP_ISUB_IMM;
12224 return OP_IDIV_IMM;
12226 return OP_IDIV_UN_IMM;
12228 return OP_IREM_IMM;
12230 return OP_IREM_UN_IMM;
12232 return OP_IMUL_IMM;
12234 return OP_IAND_IMM;
12238 return OP_IXOR_IMM;
12240 return OP_ISHL_IMM;
12242 return OP_ISHR_IMM;
12244 return OP_ISHR_UN_IMM;
12247 return OP_LADD_IMM;
12249 return OP_LSUB_IMM;
12251 return OP_LAND_IMM;
12255 return OP_LXOR_IMM;
12257 return OP_LSHL_IMM;
12259 return OP_LSHR_IMM;
12261 return OP_LSHR_UN_IMM;
12264 return OP_COMPARE_IMM;
12266 return OP_ICOMPARE_IMM;
12268 return OP_LCOMPARE_IMM;
12270 case OP_STORE_MEMBASE_REG:
12271 return OP_STORE_MEMBASE_IMM;
12272 case OP_STOREI1_MEMBASE_REG:
12273 return OP_STOREI1_MEMBASE_IMM;
12274 case OP_STOREI2_MEMBASE_REG:
12275 return OP_STOREI2_MEMBASE_IMM;
12276 case OP_STOREI4_MEMBASE_REG:
12277 return OP_STOREI4_MEMBASE_IMM;
12279 #if defined(TARGET_X86) || defined (TARGET_AMD64)
12281 return OP_X86_PUSH_IMM;
12282 case OP_X86_COMPARE_MEMBASE_REG:
12283 return OP_X86_COMPARE_MEMBASE_IMM;
12285 #if defined(TARGET_AMD64)
12286 case OP_AMD64_ICOMPARE_MEMBASE_REG:
12287 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
12289 case OP_VOIDCALL_REG:
12290 return OP_VOIDCALL;
12298 return OP_LOCALLOC_IMM;
12305 ldind_to_load_membase (int opcode)
12309 return OP_LOADI1_MEMBASE;
12311 return OP_LOADU1_MEMBASE;
12313 return OP_LOADI2_MEMBASE;
12315 return OP_LOADU2_MEMBASE;
12317 return OP_LOADI4_MEMBASE;
12319 return OP_LOADU4_MEMBASE;
12321 return OP_LOAD_MEMBASE;
12322 case CEE_LDIND_REF:
12323 return OP_LOAD_MEMBASE;
12325 return OP_LOADI8_MEMBASE;
12327 return OP_LOADR4_MEMBASE;
12329 return OP_LOADR8_MEMBASE;
12331 g_assert_not_reached ();
12338 stind_to_store_membase (int opcode)
12342 return OP_STOREI1_MEMBASE_REG;
12344 return OP_STOREI2_MEMBASE_REG;
12346 return OP_STOREI4_MEMBASE_REG;
12348 case CEE_STIND_REF:
12349 return OP_STORE_MEMBASE_REG;
12351 return OP_STOREI8_MEMBASE_REG;
12353 return OP_STORER4_MEMBASE_REG;
12355 return OP_STORER8_MEMBASE_REG;
12357 g_assert_not_reached ();
12364 mono_load_membase_to_load_mem (int opcode)
12366 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
12367 #if defined(TARGET_X86) || defined(TARGET_AMD64)
12369 case OP_LOAD_MEMBASE:
12370 return OP_LOAD_MEM;
12371 case OP_LOADU1_MEMBASE:
12372 return OP_LOADU1_MEM;
12373 case OP_LOADU2_MEMBASE:
12374 return OP_LOADU2_MEM;
12375 case OP_LOADI4_MEMBASE:
12376 return OP_LOADI4_MEM;
12377 case OP_LOADU4_MEMBASE:
12378 return OP_LOADU4_MEM;
12379 #if SIZEOF_REGISTER == 8
12380 case OP_LOADI8_MEMBASE:
12381 return OP_LOADI8_MEM;
12390 op_to_op_dest_membase (int store_opcode, int opcode)
12392 #if defined(TARGET_X86)
12393 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
12398 return OP_X86_ADD_MEMBASE_REG;
12400 return OP_X86_SUB_MEMBASE_REG;
12402 return OP_X86_AND_MEMBASE_REG;
12404 return OP_X86_OR_MEMBASE_REG;
12406 return OP_X86_XOR_MEMBASE_REG;
12409 return OP_X86_ADD_MEMBASE_IMM;
12412 return OP_X86_SUB_MEMBASE_IMM;
12415 return OP_X86_AND_MEMBASE_IMM;
12418 return OP_X86_OR_MEMBASE_IMM;
12421 return OP_X86_XOR_MEMBASE_IMM;
12427 #if defined(TARGET_AMD64)
12428 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
12433 return OP_X86_ADD_MEMBASE_REG;
12435 return OP_X86_SUB_MEMBASE_REG;
12437 return OP_X86_AND_MEMBASE_REG;
12439 return OP_X86_OR_MEMBASE_REG;
12441 return OP_X86_XOR_MEMBASE_REG;
12443 return OP_X86_ADD_MEMBASE_IMM;
12445 return OP_X86_SUB_MEMBASE_IMM;
12447 return OP_X86_AND_MEMBASE_IMM;
12449 return OP_X86_OR_MEMBASE_IMM;
12451 return OP_X86_XOR_MEMBASE_IMM;
12453 return OP_AMD64_ADD_MEMBASE_REG;
12455 return OP_AMD64_SUB_MEMBASE_REG;
12457 return OP_AMD64_AND_MEMBASE_REG;
12459 return OP_AMD64_OR_MEMBASE_REG;
12461 return OP_AMD64_XOR_MEMBASE_REG;
12464 return OP_AMD64_ADD_MEMBASE_IMM;
12467 return OP_AMD64_SUB_MEMBASE_IMM;
12470 return OP_AMD64_AND_MEMBASE_IMM;
12473 return OP_AMD64_OR_MEMBASE_IMM;
12476 return OP_AMD64_XOR_MEMBASE_IMM;
12486 op_to_op_store_membase (int store_opcode, int opcode)
12488 #if defined(TARGET_X86) || defined(TARGET_AMD64)
12491 if (store_opcode == OP_STOREI1_MEMBASE_REG)
12492 return OP_X86_SETEQ_MEMBASE;
12494 if (store_opcode == OP_STOREI1_MEMBASE_REG)
12495 return OP_X86_SETNE_MEMBASE;
12503 op_to_op_src1_membase (int load_opcode, int opcode)
12506 /* FIXME: This has sign extension issues */
12508 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
12509 return OP_X86_COMPARE_MEMBASE8_IMM;
12512 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
12517 return OP_X86_PUSH_MEMBASE;
12518 case OP_COMPARE_IMM:
12519 case OP_ICOMPARE_IMM:
12520 return OP_X86_COMPARE_MEMBASE_IMM;
12523 return OP_X86_COMPARE_MEMBASE_REG;
12527 #ifdef TARGET_AMD64
12528 /* FIXME: This has sign extension issues */
12530 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
12531 return OP_X86_COMPARE_MEMBASE8_IMM;
12536 #ifdef __mono_ilp32__
12537 if (load_opcode == OP_LOADI8_MEMBASE)
12539 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
12541 return OP_X86_PUSH_MEMBASE;
12543 /* FIXME: This only works for 32 bit immediates
12544 case OP_COMPARE_IMM:
12545 case OP_LCOMPARE_IMM:
12546 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
12547 return OP_AMD64_COMPARE_MEMBASE_IMM;
12549 case OP_ICOMPARE_IMM:
12550 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
12551 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
12555 #ifdef __mono_ilp32__
12556 if (load_opcode == OP_LOAD_MEMBASE)
12557 return OP_AMD64_ICOMPARE_MEMBASE_REG;
12558 if (load_opcode == OP_LOADI8_MEMBASE)
12560 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
12562 return OP_AMD64_COMPARE_MEMBASE_REG;
12565 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
12566 return OP_AMD64_ICOMPARE_MEMBASE_REG;
12575 op_to_op_src2_membase (int load_opcode, int opcode)
12578 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
12584 return OP_X86_COMPARE_REG_MEMBASE;
12586 return OP_X86_ADD_REG_MEMBASE;
12588 return OP_X86_SUB_REG_MEMBASE;
12590 return OP_X86_AND_REG_MEMBASE;
12592 return OP_X86_OR_REG_MEMBASE;
12594 return OP_X86_XOR_REG_MEMBASE;
12598 #ifdef TARGET_AMD64
12599 #ifdef __mono_ilp32__
12600 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE) ) {
12602 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)) {
12606 return OP_AMD64_ICOMPARE_REG_MEMBASE;
12608 return OP_X86_ADD_REG_MEMBASE;
12610 return OP_X86_SUB_REG_MEMBASE;
12612 return OP_X86_AND_REG_MEMBASE;
12614 return OP_X86_OR_REG_MEMBASE;
12616 return OP_X86_XOR_REG_MEMBASE;
12618 #ifdef __mono_ilp32__
12619 } else if (load_opcode == OP_LOADI8_MEMBASE) {
12621 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE)) {
12626 return OP_AMD64_COMPARE_REG_MEMBASE;
12628 return OP_AMD64_ADD_REG_MEMBASE;
12630 return OP_AMD64_SUB_REG_MEMBASE;
12632 return OP_AMD64_AND_REG_MEMBASE;
12634 return OP_AMD64_OR_REG_MEMBASE;
12636 return OP_AMD64_XOR_REG_MEMBASE;
12645 mono_op_to_op_imm_noemul (int opcode)
12648 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
12654 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
12661 #if defined(MONO_ARCH_EMULATE_MUL_DIV)
12666 return mono_op_to_op_imm (opcode);
12671 * mono_handle_global_vregs:
12673 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
12677 mono_handle_global_vregs (MonoCompile *cfg)
12679 gint32 *vreg_to_bb;
12680 MonoBasicBlock *bb;
12683 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
12685 #ifdef MONO_ARCH_SIMD_INTRINSICS
12686 if (cfg->uses_simd_intrinsics)
12687 mono_simd_simplify_indirection (cfg);
12690 /* Find local vregs used in more than one bb */
12691 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
12692 MonoInst *ins = bb->code;
12693 int block_num = bb->block_num;
12695 if (cfg->verbose_level > 2)
12696 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
12699 for (; ins; ins = ins->next) {
12700 const char *spec = INS_INFO (ins->opcode);
12701 int regtype = 0, regindex;
12704 if (G_UNLIKELY (cfg->verbose_level > 2))
12705 mono_print_ins (ins);
12707 g_assert (ins->opcode >= MONO_CEE_LAST);
12709 for (regindex = 0; regindex < 4; regindex ++) {
12712 if (regindex == 0) {
12713 regtype = spec [MONO_INST_DEST];
12714 if (regtype == ' ')
12717 } else if (regindex == 1) {
12718 regtype = spec [MONO_INST_SRC1];
12719 if (regtype == ' ')
12722 } else if (regindex == 2) {
12723 regtype = spec [MONO_INST_SRC2];
12724 if (regtype == ' ')
12727 } else if (regindex == 3) {
12728 regtype = spec [MONO_INST_SRC3];
12729 if (regtype == ' ')
12734 #if SIZEOF_REGISTER == 4
12735 /* In the LLVM case, the long opcodes are not decomposed */
12736 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
12738 * Since some instructions reference the original long vreg,
12739 * and some reference the two component vregs, it is quite hard
12740 * to determine when it needs to be global. So be conservative.
12742 if (!get_vreg_to_inst (cfg, vreg)) {
12743 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
12745 if (cfg->verbose_level > 2)
12746 printf ("LONG VREG R%d made global.\n", vreg);
12750 * Make the component vregs volatile since the optimizations can
12751 * get confused otherwise.
12753 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
12754 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
12758 g_assert (vreg != -1);
12760 prev_bb = vreg_to_bb [vreg];
12761 if (prev_bb == 0) {
12762 /* 0 is a valid block num */
12763 vreg_to_bb [vreg] = block_num + 1;
12764 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
12765 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
12768 if (!get_vreg_to_inst (cfg, vreg)) {
12769 if (G_UNLIKELY (cfg->verbose_level > 2))
12770 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
12774 if (vreg_is_ref (cfg, vreg))
12775 mono_compile_create_var_for_vreg (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL, vreg);
12777 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
12780 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
12783 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
12786 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
12789 g_assert_not_reached ();
12793 /* Flag as having been used in more than one bb */
12794 vreg_to_bb [vreg] = -1;
12800 /* If a variable is used in only one bblock, convert it into a local vreg */
12801 for (i = 0; i < cfg->num_varinfo; i++) {
12802 MonoInst *var = cfg->varinfo [i];
12803 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
12805 switch (var->type) {
12811 #if SIZEOF_REGISTER == 8
12814 #if !defined(TARGET_X86)
12815 /* Enabling this screws up the fp stack on x86 */
12818 if (mono_arch_is_soft_float ())
12821 /* Arguments are implicitly global */
12822 /* Putting R4 vars into registers doesn't work currently */
12823 /* The gsharedvt vars are implicitly referenced by ldaddr opcodes, but those opcodes are only generated later */
12824 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg && var != cfg->gsharedvt_info_var && var != cfg->gsharedvt_locals_var && var != cfg->lmf_addr_var) {
12826 * Make that the variable's liveness interval doesn't contain a call, since
12827 * that would cause the lvreg to be spilled, making the whole optimization
12830 /* This is too slow for JIT compilation */
12832 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
12834 int def_index, call_index, ins_index;
12835 gboolean spilled = FALSE;
12840 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
12841 const char *spec = INS_INFO (ins->opcode);
12843 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
12844 def_index = ins_index;
12846 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
12847 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
12848 if (call_index > def_index) {
12854 if (MONO_IS_CALL (ins))
12855 call_index = ins_index;
12865 if (G_UNLIKELY (cfg->verbose_level > 2))
12866 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
12867 var->flags |= MONO_INST_IS_DEAD;
12868 cfg->vreg_to_inst [var->dreg] = NULL;
12875 * Compress the varinfo and vars tables so the liveness computation is faster and
12876 * takes up less space.
12879 for (i = 0; i < cfg->num_varinfo; ++i) {
12880 MonoInst *var = cfg->varinfo [i];
12881 if (pos < i && cfg->locals_start == i)
12882 cfg->locals_start = pos;
12883 if (!(var->flags & MONO_INST_IS_DEAD)) {
12885 cfg->varinfo [pos] = cfg->varinfo [i];
12886 cfg->varinfo [pos]->inst_c0 = pos;
12887 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
12888 cfg->vars [pos].idx = pos;
12889 #if SIZEOF_REGISTER == 4
12890 if (cfg->varinfo [pos]->type == STACK_I8) {
12891 /* Modify the two component vars too */
12894 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
12895 var1->inst_c0 = pos;
12896 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
12897 var1->inst_c0 = pos;
12904 cfg->num_varinfo = pos;
12905 if (cfg->locals_start > cfg->num_varinfo)
12906 cfg->locals_start = cfg->num_varinfo;
12910 * mono_spill_global_vars:
12912 * Generate spill code for variables which are not allocated to registers,
12913 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
12914 * code is generated which could be optimized by the local optimization passes.
12917 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
12919 MonoBasicBlock *bb;
12921 int orig_next_vreg;
12922 guint32 *vreg_to_lvreg;
12924 guint32 i, lvregs_len;
12925 gboolean dest_has_lvreg = FALSE;
12926 guint32 stacktypes [128];
12927 MonoInst **live_range_start, **live_range_end;
12928 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
12929 int *gsharedvt_vreg_to_idx = NULL;
12931 *need_local_opts = FALSE;
12933 memset (spec2, 0, sizeof (spec2));
12935 /* FIXME: Move this function to mini.c */
12936 stacktypes ['i'] = STACK_PTR;
12937 stacktypes ['l'] = STACK_I8;
12938 stacktypes ['f'] = STACK_R8;
12939 #ifdef MONO_ARCH_SIMD_INTRINSICS
12940 stacktypes ['x'] = STACK_VTYPE;
12943 #if SIZEOF_REGISTER == 4
12944 /* Create MonoInsts for longs */
12945 for (i = 0; i < cfg->num_varinfo; i++) {
12946 MonoInst *ins = cfg->varinfo [i];
12948 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
12949 switch (ins->type) {
12954 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
12957 g_assert (ins->opcode == OP_REGOFFSET);
12959 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
12961 tree->opcode = OP_REGOFFSET;
12962 tree->inst_basereg = ins->inst_basereg;
12963 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
12965 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
12967 tree->opcode = OP_REGOFFSET;
12968 tree->inst_basereg = ins->inst_basereg;
12969 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
12979 if (cfg->compute_gc_maps) {
12980 /* registers need liveness info even for !non refs */
12981 for (i = 0; i < cfg->num_varinfo; i++) {
12982 MonoInst *ins = cfg->varinfo [i];
12984 if (ins->opcode == OP_REGVAR)
12985 ins->flags |= MONO_INST_GC_TRACK;
12989 if (cfg->gsharedvt) {
12990 gsharedvt_vreg_to_idx = mono_mempool_alloc0 (cfg->mempool, sizeof (int) * cfg->next_vreg);
12992 for (i = 0; i < cfg->num_varinfo; ++i) {
12993 MonoInst *ins = cfg->varinfo [i];
12996 if (mini_is_gsharedvt_variable_type (cfg, ins->inst_vtype)) {
12997 if (i >= cfg->locals_start) {
12999 idx = get_gsharedvt_info_slot (cfg, ins->inst_vtype, MONO_RGCTX_INFO_LOCAL_OFFSET);
13000 gsharedvt_vreg_to_idx [ins->dreg] = idx + 1;
13001 ins->opcode = OP_GSHAREDVT_LOCAL;
13002 ins->inst_imm = idx;
13005 gsharedvt_vreg_to_idx [ins->dreg] = -1;
13006 ins->opcode = OP_GSHAREDVT_ARG_REGOFFSET;
13012 /* FIXME: widening and truncation */
13015 * As an optimization, when a variable allocated to the stack is first loaded into
13016 * an lvreg, we will remember the lvreg and use it the next time instead of loading
13017 * the variable again.
13019 orig_next_vreg = cfg->next_vreg;
13020 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
13021 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
13025 * These arrays contain the first and last instructions accessing a given
13027 * Since we emit bblocks in the same order we process them here, and we
13028 * don't split live ranges, these will precisely describe the live range of
13029 * the variable, i.e. the instruction range where a valid value can be found
13030 * in the variables location.
13031 * The live range is computed using the liveness info computed by the liveness pass.
13032 * We can't use vmv->range, since that is an abstract live range, and we need
13033 * one which is instruction precise.
13034 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
13036 /* FIXME: Only do this if debugging info is requested */
13037 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
13038 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
13039 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
13040 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
13042 /* Add spill loads/stores */
13043 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13046 if (cfg->verbose_level > 2)
13047 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
13049 /* Clear vreg_to_lvreg array */
13050 for (i = 0; i < lvregs_len; i++)
13051 vreg_to_lvreg [lvregs [i]] = 0;
13055 MONO_BB_FOR_EACH_INS (bb, ins) {
13056 const char *spec = INS_INFO (ins->opcode);
13057 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
13058 gboolean store, no_lvreg;
13059 int sregs [MONO_MAX_SRC_REGS];
13061 if (G_UNLIKELY (cfg->verbose_level > 2))
13062 mono_print_ins (ins);
13064 if (ins->opcode == OP_NOP)
13068 * We handle LDADDR here as well, since it can only be decomposed
13069 * when variable addresses are known.
13071 if (ins->opcode == OP_LDADDR) {
13072 MonoInst *var = ins->inst_p0;
13074 if (var->opcode == OP_VTARG_ADDR) {
13075 /* Happens on SPARC/S390 where vtypes are passed by reference */
13076 MonoInst *vtaddr = var->inst_left;
13077 if (vtaddr->opcode == OP_REGVAR) {
13078 ins->opcode = OP_MOVE;
13079 ins->sreg1 = vtaddr->dreg;
13081 else if (var->inst_left->opcode == OP_REGOFFSET) {
13082 ins->opcode = OP_LOAD_MEMBASE;
13083 ins->inst_basereg = vtaddr->inst_basereg;
13084 ins->inst_offset = vtaddr->inst_offset;
13087 } else if (cfg->gsharedvt && gsharedvt_vreg_to_idx [var->dreg] < 0) {
13088 /* gsharedvt arg passed by ref */
13089 g_assert (var->opcode == OP_GSHAREDVT_ARG_REGOFFSET);
13091 ins->opcode = OP_LOAD_MEMBASE;
13092 ins->inst_basereg = var->inst_basereg;
13093 ins->inst_offset = var->inst_offset;
13094 } else if (cfg->gsharedvt && gsharedvt_vreg_to_idx [var->dreg]) {
13095 MonoInst *load, *load2, *load3;
13096 int idx = gsharedvt_vreg_to_idx [var->dreg] - 1;
13097 int reg1, reg2, reg3;
13098 MonoInst *info_var = cfg->gsharedvt_info_var;
13099 MonoInst *locals_var = cfg->gsharedvt_locals_var;
13103 * Compute the address of the local as gsharedvt_locals_var + gsharedvt_info_var->locals_offsets [idx].
13106 g_assert (var->opcode == OP_GSHAREDVT_LOCAL);
13108 g_assert (info_var);
13109 g_assert (locals_var);
13111 /* Mark the instruction used to compute the locals var as used */
13112 cfg->gsharedvt_locals_var_ins = NULL;
13114 /* Load the offset */
13115 if (info_var->opcode == OP_REGOFFSET) {
13116 reg1 = alloc_ireg (cfg);
13117 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, reg1, info_var->inst_basereg, info_var->inst_offset);
13118 } else if (info_var->opcode == OP_REGVAR) {
13120 reg1 = info_var->dreg;
13122 g_assert_not_reached ();
13124 reg2 = alloc_ireg (cfg);
13125 NEW_LOAD_MEMBASE (cfg, load2, OP_LOADI4_MEMBASE, reg2, reg1, G_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
13126 /* Load the locals area address */
13127 reg3 = alloc_ireg (cfg);
13128 if (locals_var->opcode == OP_REGOFFSET) {
13129 NEW_LOAD_MEMBASE (cfg, load3, OP_LOAD_MEMBASE, reg3, locals_var->inst_basereg, locals_var->inst_offset);
13130 } else if (locals_var->opcode == OP_REGVAR) {
13131 NEW_UNALU (cfg, load3, OP_MOVE, reg3, locals_var->dreg);
13133 g_assert_not_reached ();
13135 /* Compute the address */
13136 ins->opcode = OP_PADD;
13140 mono_bblock_insert_before_ins (bb, ins, load3);
13141 mono_bblock_insert_before_ins (bb, load3, load2);
13143 mono_bblock_insert_before_ins (bb, load2, load);
13145 g_assert (var->opcode == OP_REGOFFSET);
13147 ins->opcode = OP_ADD_IMM;
13148 ins->sreg1 = var->inst_basereg;
13149 ins->inst_imm = var->inst_offset;
13152 *need_local_opts = TRUE;
13153 spec = INS_INFO (ins->opcode);
13156 if (ins->opcode < MONO_CEE_LAST) {
13157 mono_print_ins (ins);
13158 g_assert_not_reached ();
13162 * Store opcodes have destbasereg in the dreg, but in reality, it is an
13166 if (MONO_IS_STORE_MEMBASE (ins)) {
13167 tmp_reg = ins->dreg;
13168 ins->dreg = ins->sreg2;
13169 ins->sreg2 = tmp_reg;
13172 spec2 [MONO_INST_DEST] = ' ';
13173 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
13174 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
13175 spec2 [MONO_INST_SRC3] = ' ';
13177 } else if (MONO_IS_STORE_MEMINDEX (ins))
13178 g_assert_not_reached ();
13183 if (G_UNLIKELY (cfg->verbose_level > 2)) {
13184 printf ("\t %.3s %d", spec, ins->dreg);
13185 num_sregs = mono_inst_get_src_registers (ins, sregs);
13186 for (srcindex = 0; srcindex < num_sregs; ++srcindex)
13187 printf (" %d", sregs [srcindex]);
13194 regtype = spec [MONO_INST_DEST];
13195 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
13198 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
13199 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
13200 MonoInst *store_ins;
13202 MonoInst *def_ins = ins;
13203 int dreg = ins->dreg; /* The original vreg */
13205 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
13207 if (var->opcode == OP_REGVAR) {
13208 ins->dreg = var->dreg;
13209 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
13211 * Instead of emitting a load+store, use a _membase opcode.
13213 g_assert (var->opcode == OP_REGOFFSET);
13214 if (ins->opcode == OP_MOVE) {
13218 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
13219 ins->inst_basereg = var->inst_basereg;
13220 ins->inst_offset = var->inst_offset;
13223 spec = INS_INFO (ins->opcode);
13227 g_assert (var->opcode == OP_REGOFFSET);
13229 prev_dreg = ins->dreg;
13231 /* Invalidate any previous lvreg for this vreg */
13232 vreg_to_lvreg [ins->dreg] = 0;
13236 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
13238 store_opcode = OP_STOREI8_MEMBASE_REG;
13241 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
13243 #if SIZEOF_REGISTER != 8
13244 if (regtype == 'l') {
13245 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
13246 mono_bblock_insert_after_ins (bb, ins, store_ins);
13247 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
13248 mono_bblock_insert_after_ins (bb, ins, store_ins);
13249 def_ins = store_ins;
13254 g_assert (store_opcode != OP_STOREV_MEMBASE);
13256 /* Try to fuse the store into the instruction itself */
13257 /* FIXME: Add more instructions */
13258 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
13259 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
13260 ins->inst_imm = ins->inst_c0;
13261 ins->inst_destbasereg = var->inst_basereg;
13262 ins->inst_offset = var->inst_offset;
13263 spec = INS_INFO (ins->opcode);
13264 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
13265 ins->opcode = store_opcode;
13266 ins->inst_destbasereg = var->inst_basereg;
13267 ins->inst_offset = var->inst_offset;
13271 tmp_reg = ins->dreg;
13272 ins->dreg = ins->sreg2;
13273 ins->sreg2 = tmp_reg;
13276 spec2 [MONO_INST_DEST] = ' ';
13277 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
13278 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
13279 spec2 [MONO_INST_SRC3] = ' ';
13281 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
13282 // FIXME: The backends expect the base reg to be in inst_basereg
13283 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
13285 ins->inst_basereg = var->inst_basereg;
13286 ins->inst_offset = var->inst_offset;
13287 spec = INS_INFO (ins->opcode);
13289 /* printf ("INS: "); mono_print_ins (ins); */
13290 /* Create a store instruction */
13291 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
13293 /* Insert it after the instruction */
13294 mono_bblock_insert_after_ins (bb, ins, store_ins);
13296 def_ins = store_ins;
13299 * We can't assign ins->dreg to var->dreg here, since the
13300 * sregs could use it. So set a flag, and do it after
13303 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
13304 dest_has_lvreg = TRUE;
13309 if (def_ins && !live_range_start [dreg]) {
13310 live_range_start [dreg] = def_ins;
13311 live_range_start_bb [dreg] = bb;
13314 if (cfg->compute_gc_maps && def_ins && (var->flags & MONO_INST_GC_TRACK)) {
13317 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_DEF);
13318 tmp->inst_c1 = dreg;
13319 mono_bblock_insert_after_ins (bb, def_ins, tmp);
13326 num_sregs = mono_inst_get_src_registers (ins, sregs);
13327 for (srcindex = 0; srcindex < 3; ++srcindex) {
13328 regtype = spec [MONO_INST_SRC1 + srcindex];
13329 sreg = sregs [srcindex];
13331 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
13332 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
13333 MonoInst *var = get_vreg_to_inst (cfg, sreg);
13334 MonoInst *use_ins = ins;
13335 MonoInst *load_ins;
13336 guint32 load_opcode;
13338 if (var->opcode == OP_REGVAR) {
13339 sregs [srcindex] = var->dreg;
13340 //mono_inst_set_src_registers (ins, sregs);
13341 live_range_end [sreg] = use_ins;
13342 live_range_end_bb [sreg] = bb;
13344 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
13347 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
13348 /* var->dreg is a hreg */
13349 tmp->inst_c1 = sreg;
13350 mono_bblock_insert_after_ins (bb, ins, tmp);
13356 g_assert (var->opcode == OP_REGOFFSET);
13358 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
13360 g_assert (load_opcode != OP_LOADV_MEMBASE);
13362 if (vreg_to_lvreg [sreg]) {
13363 g_assert (vreg_to_lvreg [sreg] != -1);
13365 /* The variable is already loaded to an lvreg */
13366 if (G_UNLIKELY (cfg->verbose_level > 2))
13367 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
13368 sregs [srcindex] = vreg_to_lvreg [sreg];
13369 //mono_inst_set_src_registers (ins, sregs);
13373 /* Try to fuse the load into the instruction */
13374 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
13375 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
13376 sregs [0] = var->inst_basereg;
13377 //mono_inst_set_src_registers (ins, sregs);
13378 ins->inst_offset = var->inst_offset;
13379 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
13380 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
13381 sregs [1] = var->inst_basereg;
13382 //mono_inst_set_src_registers (ins, sregs);
13383 ins->inst_offset = var->inst_offset;
13385 if (MONO_IS_REAL_MOVE (ins)) {
13386 ins->opcode = OP_NOP;
13389 //printf ("%d ", srcindex); mono_print_ins (ins);
13391 sreg = alloc_dreg (cfg, stacktypes [regtype]);
13393 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
13394 if (var->dreg == prev_dreg) {
13396 * sreg refers to the value loaded by the load
13397 * emitted below, but we need to use ins->dreg
13398 * since it refers to the store emitted earlier.
13402 g_assert (sreg != -1);
13403 vreg_to_lvreg [var->dreg] = sreg;
13404 g_assert (lvregs_len < 1024);
13405 lvregs [lvregs_len ++] = var->dreg;
13409 sregs [srcindex] = sreg;
13410 //mono_inst_set_src_registers (ins, sregs);
13412 #if SIZEOF_REGISTER != 8
13413 if (regtype == 'l') {
13414 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
13415 mono_bblock_insert_before_ins (bb, ins, load_ins);
13416 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
13417 mono_bblock_insert_before_ins (bb, ins, load_ins);
13418 use_ins = load_ins;
13423 #if SIZEOF_REGISTER == 4
13424 g_assert (load_opcode != OP_LOADI8_MEMBASE);
13426 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
13427 mono_bblock_insert_before_ins (bb, ins, load_ins);
13428 use_ins = load_ins;
13432 if (var->dreg < orig_next_vreg) {
13433 live_range_end [var->dreg] = use_ins;
13434 live_range_end_bb [var->dreg] = bb;
13437 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
13440 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
13441 tmp->inst_c1 = var->dreg;
13442 mono_bblock_insert_after_ins (bb, ins, tmp);
13446 mono_inst_set_src_registers (ins, sregs);
13448 if (dest_has_lvreg) {
13449 g_assert (ins->dreg != -1);
13450 vreg_to_lvreg [prev_dreg] = ins->dreg;
13451 g_assert (lvregs_len < 1024);
13452 lvregs [lvregs_len ++] = prev_dreg;
13453 dest_has_lvreg = FALSE;
13457 tmp_reg = ins->dreg;
13458 ins->dreg = ins->sreg2;
13459 ins->sreg2 = tmp_reg;
13462 if (MONO_IS_CALL (ins)) {
13463 /* Clear vreg_to_lvreg array */
13464 for (i = 0; i < lvregs_len; i++)
13465 vreg_to_lvreg [lvregs [i]] = 0;
13467 } else if (ins->opcode == OP_NOP) {
13469 MONO_INST_NULLIFY_SREGS (ins);
13472 if (cfg->verbose_level > 2)
13473 mono_print_ins_index (1, ins);
13476 /* Extend the live range based on the liveness info */
13477 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
13478 for (i = 0; i < cfg->num_varinfo; i ++) {
13479 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
13481 if (vreg_is_volatile (cfg, vi->vreg))
13482 /* The liveness info is incomplete */
13485 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
13486 /* Live from at least the first ins of this bb */
13487 live_range_start [vi->vreg] = bb->code;
13488 live_range_start_bb [vi->vreg] = bb;
13491 if (mono_bitset_test_fast (bb->live_out_set, i)) {
13492 /* Live at least until the last ins of this bb */
13493 live_range_end [vi->vreg] = bb->last_ins;
13494 live_range_end_bb [vi->vreg] = bb;
13500 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
13502 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
13503 * by storing the current native offset into MonoMethodVar->live_range_start/end.
13505 if (cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
13506 for (i = 0; i < cfg->num_varinfo; ++i) {
13507 int vreg = MONO_VARINFO (cfg, i)->vreg;
13510 if (live_range_start [vreg]) {
13511 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
13513 ins->inst_c1 = vreg;
13514 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
13516 if (live_range_end [vreg]) {
13517 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
13519 ins->inst_c1 = vreg;
13520 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
13521 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
13523 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
13529 if (cfg->gsharedvt_locals_var_ins) {
13530 /* Nullify if unused */
13531 cfg->gsharedvt_locals_var_ins->opcode = OP_PCONST;
13532 cfg->gsharedvt_locals_var_ins->inst_imm = 0;
13535 g_free (live_range_start);
13536 g_free (live_range_end);
13537 g_free (live_range_start_bb);
13538 g_free (live_range_end_bb);
13543 * - use 'iadd' instead of 'int_add'
13544 * - handling ovf opcodes: decompose in method_to_ir.
13545 * - unify iregs/fregs
13546 * -> partly done, the missing parts are:
13547 * - a more complete unification would involve unifying the hregs as well, so
13548 * code wouldn't need if (fp) all over the place. but that would mean the hregs
13549 * would no longer map to the machine hregs, so the code generators would need to
13550 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
13551 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
13552 * fp/non-fp branches speeds it up by about 15%.
13553 * - use sext/zext opcodes instead of shifts
13555 * - get rid of TEMPLOADs if possible and use vregs instead
13556 * - clean up usage of OP_P/OP_ opcodes
13557 * - cleanup usage of DUMMY_USE
13558 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
13560 * - set the stack type and allocate a dreg in the EMIT_NEW macros
13561 * - get rid of all the <foo>2 stuff when the new JIT is ready.
13562 * - make sure handle_stack_args () is called before the branch is emitted
13563 * - when the new IR is done, get rid of all unused stuff
13564 * - COMPARE/BEQ as separate instructions or unify them ?
13565 * - keeping them separate allows specialized compare instructions like
13566 * compare_imm, compare_membase
13567 * - most back ends unify fp compare+branch, fp compare+ceq
13568 * - integrate mono_save_args into inline_method
13569 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
13570 * - handle long shift opts on 32 bit platforms somehow: they require
13571 * 3 sregs (2 for arg1 and 1 for arg2)
13572 * - make byref a 'normal' type.
13573 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
13574 * variable if needed.
13575 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
13576 * like inline_method.
13577 * - remove inlining restrictions
13578 * - fix LNEG and enable cfold of INEG
13579 * - generalize x86 optimizations like ldelema as a peephole optimization
13580 * - add store_mem_imm for amd64
13581 * - optimize the loading of the interruption flag in the managed->native wrappers
13582 * - avoid special handling of OP_NOP in passes
13583 * - move code inserting instructions into one function/macro.
13584 * - try a coalescing phase after liveness analysis
13585 * - add float -> vreg conversion + local optimizations on !x86
13586 * - figure out how to handle decomposed branches during optimizations, ie.
13587 * compare+branch, op_jump_table+op_br etc.
13588 * - promote RuntimeXHandles to vregs
13589 * - vtype cleanups:
13590 * - add a NEW_VARLOADA_VREG macro
13591 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
13592 * accessing vtype fields.
13593 * - get rid of I8CONST on 64 bit platforms
13594 * - dealing with the increase in code size due to branches created during opcode
13596 * - use extended basic blocks
13597 * - all parts of the JIT
13598 * - handle_global_vregs () && local regalloc
13599 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
13600 * - sources of increase in code size:
13603 * - isinst and castclass
13604 * - lvregs not allocated to global registers even if used multiple times
13605 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
13607 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
13608 * - add all micro optimizations from the old JIT
13609 * - put tree optimizations into the deadce pass
13610 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
13611 * specific function.
13612 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
13613 * fcompare + branchCC.
13614 * - create a helper function for allocating a stack slot, taking into account
13615 * MONO_CFG_HAS_SPILLUP.
13617 * - merge the ia64 switch changes.
13618 * - optimize mono_regstate2_alloc_int/float.
13619 * - fix the pessimistic handling of variables accessed in exception handler blocks.
13620 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
13621 * parts of the tree could be separated by other instructions, killing the tree
13622 * arguments, or stores killing loads etc. Also, should we fold loads into other
13623 * instructions if the result of the load is used multiple times ?
13624 * - make the REM_IMM optimization in mini-x86.c arch-independent.
13625 * - LAST MERGE: 108395.
13626 * - when returning vtypes in registers, generate IR and append it to the end of the
13627 * last bb instead of doing it in the epilog.
13628 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
13636 - When to decompose opcodes:
13637 - earlier: this makes some optimizations hard to implement, since the low level IR
13638 no longer contains the neccessary information. But it is easier to do.
13639 - later: harder to implement, enables more optimizations.
13640 - Branches inside bblocks:
13641 - created when decomposing complex opcodes.
13642 - branches to another bblock: harmless, but not tracked by the branch
13643 optimizations, so need to branch to a label at the start of the bblock.
13644 - branches to inside the same bblock: very problematic, trips up the local
13645 reg allocator. Can be fixed by spitting the current bblock, but that is a
13646 complex operation, since some local vregs can become global vregs etc.
13647 - Local/global vregs:
13648 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
13649 local register allocator.
13650 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
13651 structure, created by mono_create_var (). Assigned to hregs or the stack by
13652 the global register allocator.
13653 - When to do optimizations like alu->alu_imm:
13654 - earlier -> saves work later on since the IR will be smaller/simpler
13655 - later -> can work on more instructions
13656 - Handling of valuetypes:
13657 - When a vtype is pushed on the stack, a new temporary is created, an
13658 instruction computing its address (LDADDR) is emitted and pushed on
13659 the stack. Need to optimize cases when the vtype is used immediately as in
13660 argument passing, stloc etc.
13661 - Instead of the to_end stuff in the old JIT, simply call the function handling
13662 the values on the stack before emitting the last instruction of the bb.
13665 #endif /* DISABLE_JIT */