2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
9 * Copyright 2003-2010 Novell, Inc (http://www.novell.com)
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
27 #ifdef HAVE_SYS_TIME_H
35 #include <mono/utils/memcheck.h>
37 #include <mono/metadata/assembly.h>
38 #include <mono/metadata/attrdefs.h>
39 #include <mono/metadata/loader.h>
40 #include <mono/metadata/tabledefs.h>
41 #include <mono/metadata/class.h>
42 #include <mono/metadata/object.h>
43 #include <mono/metadata/exception.h>
44 #include <mono/metadata/opcodes.h>
45 #include <mono/metadata/mono-endian.h>
46 #include <mono/metadata/tokentype.h>
47 #include <mono/metadata/tabledefs.h>
48 #include <mono/metadata/marshal.h>
49 #include <mono/metadata/debug-helpers.h>
50 #include <mono/metadata/mono-debug.h>
51 #include <mono/metadata/gc-internal.h>
52 #include <mono/metadata/security-manager.h>
53 #include <mono/metadata/threads-types.h>
54 #include <mono/metadata/security-core-clr.h>
55 #include <mono/metadata/monitor.h>
56 #include <mono/metadata/profiler-private.h>
57 #include <mono/metadata/profiler.h>
58 #include <mono/metadata/debug-mono-symfile.h>
59 #include <mono/utils/mono-compiler.h>
60 #include <mono/utils/mono-memory-model.h>
61 #include <mono/metadata/mono-basic-block.h>
68 #include "jit-icalls.h"
70 #include "debugger-agent.h"
72 #define BRANCH_COST 10
73 #define INLINE_LENGTH_LIMIT 20
74 #define INLINE_FAILURE(msg) do { \
75 if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE)) { \
76 if (cfg->verbose_level >= 2) \
77 printf ("inline failed: %s\n", msg); \
78 goto inline_failure; \
81 #define CHECK_CFG_EXCEPTION do {\
82 if (cfg->exception_type != MONO_EXCEPTION_NONE)\
85 #define METHOD_ACCESS_FAILURE do { \
86 char *method_fname = mono_method_full_name (method, TRUE); \
87 char *cil_method_fname = mono_method_full_name (cil_method, TRUE); \
88 mono_cfg_set_exception (cfg, MONO_EXCEPTION_METHOD_ACCESS); \
89 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname); \
90 g_free (method_fname); \
91 g_free (cil_method_fname); \
92 goto exception_exit; \
94 #define FIELD_ACCESS_FAILURE do { \
95 char *method_fname = mono_method_full_name (method, TRUE); \
96 char *field_fname = mono_field_full_name (field); \
97 mono_cfg_set_exception (cfg, MONO_EXCEPTION_FIELD_ACCESS); \
98 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); \
99 g_free (method_fname); \
100 g_free (field_fname); \
101 goto exception_exit; \
103 #define GENERIC_SHARING_FAILURE(opcode) do { \
104 if (cfg->generic_sharing_context) { \
105 if (cfg->verbose_level > 2) \
106 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __LINE__); \
107 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
108 goto exception_exit; \
111 #define GSHAREDVT_FAILURE(opcode) do { \
112 if (cfg->gsharedvt) { \
113 cfg->exception_message = g_strdup_printf ("gsharedvt failed for method %s.%s.%s/%d opcode %s %s:%d", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __FILE__, __LINE__); \
114 if (cfg->verbose_level >= 2) \
115 printf ("%s\n", cfg->exception_message); \
116 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
117 goto exception_exit; \
120 #define OUT_OF_MEMORY_FAILURE do { \
121 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OUT_OF_MEMORY); \
122 goto exception_exit; \
124 #define DISABLE_AOT(cfg) do { \
125 if ((cfg)->verbose_level >= 2) \
126 printf ("AOT disabled: %s:%d\n", __FILE__, __LINE__); \
127 (cfg)->disable_aot = TRUE; \
130 /* Determine whenever 'ins' represents a load of the 'this' argument */
131 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
133 static int ldind_to_load_membase (int opcode);
134 static int stind_to_store_membase (int opcode);
136 int mono_op_to_op_imm (int opcode);
137 int mono_op_to_op_imm_noemul (int opcode);
139 MONO_API MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
141 /* helper methods signatures */
142 static MonoMethodSignature *helper_sig_class_init_trampoline = NULL;
143 static MonoMethodSignature *helper_sig_domain_get = NULL;
144 static MonoMethodSignature *helper_sig_generic_class_init_trampoline = NULL;
145 static MonoMethodSignature *helper_sig_generic_class_init_trampoline_llvm = NULL;
146 static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline = NULL;
147 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline = NULL;
148 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline_llvm = NULL;
151 * Instruction metadata
159 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
160 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
166 #if SIZEOF_REGISTER == 8 && SIZEOF_REGISTER == SIZEOF_VOID_P
171 /* keep in sync with the enum in mini.h */
174 #include "mini-ops.h"
179 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
180 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
182 * This should contain the index of the last sreg + 1. This is not the same
183 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
185 const gint8 ins_sreg_counts[] = {
186 #include "mini-ops.h"
191 #define MONO_INIT_VARINFO(vi,id) do { \
192 (vi)->range.first_use.pos.bid = 0xffff; \
198 mono_inst_set_src_registers (MonoInst *ins, int *regs)
200 ins->sreg1 = regs [0];
201 ins->sreg2 = regs [1];
202 ins->sreg3 = regs [2];
206 mono_alloc_ireg (MonoCompile *cfg)
208 return alloc_ireg (cfg);
212 mono_alloc_freg (MonoCompile *cfg)
214 return alloc_freg (cfg);
218 mono_alloc_preg (MonoCompile *cfg)
220 return alloc_preg (cfg);
224 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
226 return alloc_dreg (cfg, stack_type);
230 * mono_alloc_ireg_ref:
232 * Allocate an IREG, and mark it as holding a GC ref.
235 mono_alloc_ireg_ref (MonoCompile *cfg)
237 return alloc_ireg_ref (cfg);
241 * mono_alloc_ireg_mp:
243 * Allocate an IREG, and mark it as holding a managed pointer.
246 mono_alloc_ireg_mp (MonoCompile *cfg)
248 return alloc_ireg_mp (cfg);
252 * mono_alloc_ireg_copy:
254 * Allocate an IREG with the same GC type as VREG.
257 mono_alloc_ireg_copy (MonoCompile *cfg, guint32 vreg)
259 if (vreg_is_ref (cfg, vreg))
260 return alloc_ireg_ref (cfg);
261 else if (vreg_is_mp (cfg, vreg))
262 return alloc_ireg_mp (cfg);
264 return alloc_ireg (cfg);
268 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
273 type = mini_replace_type (type);
275 switch (type->type) {
278 case MONO_TYPE_BOOLEAN:
290 case MONO_TYPE_FNPTR:
292 case MONO_TYPE_CLASS:
293 case MONO_TYPE_STRING:
294 case MONO_TYPE_OBJECT:
295 case MONO_TYPE_SZARRAY:
296 case MONO_TYPE_ARRAY:
300 #if SIZEOF_REGISTER == 8
309 case MONO_TYPE_VALUETYPE:
310 if (type->data.klass->enumtype) {
311 type = mono_class_enum_basetype (type->data.klass);
314 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
317 case MONO_TYPE_TYPEDBYREF:
319 case MONO_TYPE_GENERICINST:
320 type = &type->data.generic_class->container_class->byval_arg;
324 g_assert (cfg->generic_sharing_context);
325 if (mini_type_var_is_vt (cfg, type))
330 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
336 mono_print_bb (MonoBasicBlock *bb, const char *msg)
341 printf ("\n%s %d: [IN: ", msg, bb->block_num);
342 for (i = 0; i < bb->in_count; ++i)
343 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
345 for (i = 0; i < bb->out_count; ++i)
346 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
348 for (tree = bb->code; tree; tree = tree->next)
349 mono_print_ins_index (-1, tree);
353 mono_create_helper_signatures (void)
355 helper_sig_domain_get = mono_create_icall_signature ("ptr");
356 helper_sig_class_init_trampoline = mono_create_icall_signature ("void");
357 helper_sig_generic_class_init_trampoline = mono_create_icall_signature ("void");
358 helper_sig_generic_class_init_trampoline_llvm = mono_create_icall_signature ("void ptr");
359 helper_sig_rgctx_lazy_fetch_trampoline = mono_create_icall_signature ("ptr ptr");
360 helper_sig_monitor_enter_exit_trampoline = mono_create_icall_signature ("void");
361 helper_sig_monitor_enter_exit_trampoline_llvm = mono_create_icall_signature ("void object");
365 * When using gsharedvt, some instatiations might be verifiable, and some might be not. i.e.
366 * foo<T> (int i) { ldarg.0; box T; }
368 #define UNVERIFIED do { \
369 if (cfg->gsharedvt) { \
370 if (cfg->verbose_level > 2) \
371 printf ("gsharedvt method failed to verify, falling back to instantiation.\n"); \
372 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
373 goto exception_exit; \
375 if (mini_get_debug_options ()->break_on_unverified) \
381 #define LOAD_ERROR do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto load_error; } while (0)
383 #define TYPE_LOAD_ERROR(klass) do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else { cfg->exception_ptr = klass; goto load_error; } } while (0)
385 #define GET_BBLOCK(cfg,tblock,ip) do { \
386 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
388 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
389 NEW_BBLOCK (cfg, (tblock)); \
390 (tblock)->cil_code = (ip); \
391 ADD_BBLOCK (cfg, (tblock)); \
395 #if defined(TARGET_X86) || defined(TARGET_AMD64)
396 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
397 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
398 (dest)->dreg = alloc_ireg_mp ((cfg)); \
399 (dest)->sreg1 = (sr1); \
400 (dest)->sreg2 = (sr2); \
401 (dest)->inst_imm = (imm); \
402 (dest)->backend.shift_amount = (shift); \
403 MONO_ADD_INS ((cfg)->cbb, (dest)); \
407 #if SIZEOF_REGISTER == 8
408 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
409 /* FIXME: Need to add many more cases */ \
410 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
412 int dr = alloc_preg (cfg); \
413 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
414 (ins)->sreg2 = widen->dreg; \
418 #define ADD_WIDEN_OP(ins, arg1, arg2)
421 #define ADD_BINOP(op) do { \
422 MONO_INST_NEW (cfg, ins, (op)); \
424 ins->sreg1 = sp [0]->dreg; \
425 ins->sreg2 = sp [1]->dreg; \
426 type_from_op (ins, sp [0], sp [1]); \
428 /* Have to insert a widening op */ \
429 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
430 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
431 MONO_ADD_INS ((cfg)->cbb, (ins)); \
432 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
435 #define ADD_UNOP(op) do { \
436 MONO_INST_NEW (cfg, ins, (op)); \
438 ins->sreg1 = sp [0]->dreg; \
439 type_from_op (ins, sp [0], NULL); \
441 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
442 MONO_ADD_INS ((cfg)->cbb, (ins)); \
443 *sp++ = mono_decompose_opcode (cfg, ins); \
446 #define ADD_BINCOND(next_block) do { \
449 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
450 cmp->sreg1 = sp [0]->dreg; \
451 cmp->sreg2 = sp [1]->dreg; \
452 type_from_op (cmp, sp [0], sp [1]); \
454 type_from_op (ins, sp [0], sp [1]); \
455 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
456 GET_BBLOCK (cfg, tblock, target); \
457 link_bblock (cfg, bblock, tblock); \
458 ins->inst_true_bb = tblock; \
459 if ((next_block)) { \
460 link_bblock (cfg, bblock, (next_block)); \
461 ins->inst_false_bb = (next_block); \
462 start_new_bblock = 1; \
464 GET_BBLOCK (cfg, tblock, ip); \
465 link_bblock (cfg, bblock, tblock); \
466 ins->inst_false_bb = tblock; \
467 start_new_bblock = 2; \
469 if (sp != stack_start) { \
470 handle_stack_args (cfg, stack_start, sp - stack_start); \
471 CHECK_UNVERIFIABLE (cfg); \
473 MONO_ADD_INS (bblock, cmp); \
474 MONO_ADD_INS (bblock, ins); \
478 * link_bblock: Links two basic blocks
480 * links two basic blocks in the control flow graph, the 'from'
481 * argument is the starting block and the 'to' argument is the block
482 * the control flow ends to after 'from'.
485 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
487 MonoBasicBlock **newa;
491 if (from->cil_code) {
493 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
495 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
498 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
500 printf ("edge from entry to exit\n");
505 for (i = 0; i < from->out_count; ++i) {
506 if (to == from->out_bb [i]) {
512 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
513 for (i = 0; i < from->out_count; ++i) {
514 newa [i] = from->out_bb [i];
522 for (i = 0; i < to->in_count; ++i) {
523 if (from == to->in_bb [i]) {
529 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
530 for (i = 0; i < to->in_count; ++i) {
531 newa [i] = to->in_bb [i];
540 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
542 link_bblock (cfg, from, to);
546 * mono_find_block_region:
548 * We mark each basic block with a region ID. We use that to avoid BB
549 * optimizations when blocks are in different regions.
552 * A region token that encodes where this region is, and information
553 * about the clause owner for this block.
555 * The region encodes the try/catch/filter clause that owns this block
556 * as well as the type. -1 is a special value that represents a block
557 * that is in none of try/catch/filter.
560 mono_find_block_region (MonoCompile *cfg, int offset)
562 MonoMethodHeader *header = cfg->header;
563 MonoExceptionClause *clause;
566 for (i = 0; i < header->num_clauses; ++i) {
567 clause = &header->clauses [i];
568 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
569 (offset < (clause->handler_offset)))
570 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
572 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
573 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
574 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
575 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
576 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
578 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
581 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
582 return ((i + 1) << 8) | clause->flags;
589 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
591 MonoMethodHeader *header = cfg->header;
592 MonoExceptionClause *clause;
596 for (i = 0; i < header->num_clauses; ++i) {
597 clause = &header->clauses [i];
598 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
599 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
600 if (clause->flags == type)
601 res = g_list_append (res, clause);
608 mono_create_spvar_for_region (MonoCompile *cfg, int region)
612 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
616 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
617 /* prevent it from being register allocated */
618 var->flags |= MONO_INST_VOLATILE;
620 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
624 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
626 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
630 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
634 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
638 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
639 /* prevent it from being register allocated */
640 var->flags |= MONO_INST_VOLATILE;
642 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
648 * Returns the type used in the eval stack when @type is loaded.
649 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
652 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
656 type = mini_replace_type (type);
657 inst->klass = klass = mono_class_from_mono_type (type);
659 inst->type = STACK_MP;
664 switch (type->type) {
666 inst->type = STACK_INV;
670 case MONO_TYPE_BOOLEAN:
676 inst->type = STACK_I4;
681 case MONO_TYPE_FNPTR:
682 inst->type = STACK_PTR;
684 case MONO_TYPE_CLASS:
685 case MONO_TYPE_STRING:
686 case MONO_TYPE_OBJECT:
687 case MONO_TYPE_SZARRAY:
688 case MONO_TYPE_ARRAY:
689 inst->type = STACK_OBJ;
693 inst->type = STACK_I8;
697 inst->type = STACK_R8;
699 case MONO_TYPE_VALUETYPE:
700 if (type->data.klass->enumtype) {
701 type = mono_class_enum_basetype (type->data.klass);
705 inst->type = STACK_VTYPE;
708 case MONO_TYPE_TYPEDBYREF:
709 inst->klass = mono_defaults.typed_reference_class;
710 inst->type = STACK_VTYPE;
712 case MONO_TYPE_GENERICINST:
713 type = &type->data.generic_class->container_class->byval_arg;
717 g_assert (cfg->generic_sharing_context);
718 if (mini_is_gsharedvt_type (cfg, type)) {
719 g_assert (cfg->gsharedvt);
720 inst->type = STACK_VTYPE;
722 inst->type = STACK_OBJ;
726 g_error ("unknown type 0x%02x in eval stack type", type->type);
731 * The following tables are used to quickly validate the IL code in type_from_op ().
734 bin_num_table [STACK_MAX] [STACK_MAX] = {
735 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
736 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
737 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
738 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
739 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV},
740 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
741 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
742 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
747 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV
750 /* reduce the size of this table */
752 bin_int_table [STACK_MAX] [STACK_MAX] = {
753 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
754 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
755 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
756 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
757 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
758 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
759 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
760 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
764 bin_comp_table [STACK_MAX] [STACK_MAX] = {
765 /* Inv i L p F & O vt */
767 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
768 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
769 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
770 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
771 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
772 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
773 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
776 /* reduce the size of this table */
778 shift_table [STACK_MAX] [STACK_MAX] = {
779 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
780 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
781 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
782 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
783 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
784 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
785 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
786 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
790 * Tables to map from the non-specific opcode to the matching
791 * type-specific opcode.
793 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
795 binops_op_map [STACK_MAX] = {
796 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD
799 /* handles from CEE_NEG to CEE_CONV_U8 */
801 unops_op_map [STACK_MAX] = {
802 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG
805 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
807 ovfops_op_map [STACK_MAX] = {
808 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2
811 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
813 ovf2ops_op_map [STACK_MAX] = {
814 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
817 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
819 ovf3ops_op_map [STACK_MAX] = {
820 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1
823 /* handles from CEE_BEQ to CEE_BLT_UN */
825 beqops_op_map [STACK_MAX] = {
826 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ
829 /* handles from CEE_CEQ to CEE_CLT_UN */
831 ceqops_op_map [STACK_MAX] = {
832 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ
836 * Sets ins->type (the type on the eval stack) according to the
837 * type of the opcode and the arguments to it.
838 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
840 * FIXME: this function sets ins->type unconditionally in some cases, but
841 * it should set it to invalid for some types (a conv.x on an object)
844 type_from_op (MonoInst *ins, MonoInst *src1, MonoInst *src2) {
846 switch (ins->opcode) {
853 /* FIXME: check unverifiable args for STACK_MP */
854 ins->type = bin_num_table [src1->type] [src2->type];
855 ins->opcode += binops_op_map [ins->type];
862 ins->type = bin_int_table [src1->type] [src2->type];
863 ins->opcode += binops_op_map [ins->type];
868 ins->type = shift_table [src1->type] [src2->type];
869 ins->opcode += binops_op_map [ins->type];
874 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
875 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
876 ins->opcode = OP_LCOMPARE;
877 else if (src1->type == STACK_R8)
878 ins->opcode = OP_FCOMPARE;
880 ins->opcode = OP_ICOMPARE;
882 case OP_ICOMPARE_IMM:
883 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
884 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
885 ins->opcode = OP_LCOMPARE_IMM;
897 ins->opcode += beqops_op_map [src1->type];
900 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
901 ins->opcode += ceqops_op_map [src1->type];
907 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
908 ins->opcode += ceqops_op_map [src1->type];
912 ins->type = neg_table [src1->type];
913 ins->opcode += unops_op_map [ins->type];
916 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
917 ins->type = src1->type;
919 ins->type = STACK_INV;
920 ins->opcode += unops_op_map [ins->type];
926 ins->type = STACK_I4;
927 ins->opcode += unops_op_map [src1->type];
930 ins->type = STACK_R8;
931 switch (src1->type) {
934 ins->opcode = OP_ICONV_TO_R_UN;
937 ins->opcode = OP_LCONV_TO_R_UN;
941 case CEE_CONV_OVF_I1:
942 case CEE_CONV_OVF_U1:
943 case CEE_CONV_OVF_I2:
944 case CEE_CONV_OVF_U2:
945 case CEE_CONV_OVF_I4:
946 case CEE_CONV_OVF_U4:
947 ins->type = STACK_I4;
948 ins->opcode += ovf3ops_op_map [src1->type];
950 case CEE_CONV_OVF_I_UN:
951 case CEE_CONV_OVF_U_UN:
952 ins->type = STACK_PTR;
953 ins->opcode += ovf2ops_op_map [src1->type];
955 case CEE_CONV_OVF_I1_UN:
956 case CEE_CONV_OVF_I2_UN:
957 case CEE_CONV_OVF_I4_UN:
958 case CEE_CONV_OVF_U1_UN:
959 case CEE_CONV_OVF_U2_UN:
960 case CEE_CONV_OVF_U4_UN:
961 ins->type = STACK_I4;
962 ins->opcode += ovf2ops_op_map [src1->type];
965 ins->type = STACK_PTR;
966 switch (src1->type) {
968 ins->opcode = OP_ICONV_TO_U;
972 #if SIZEOF_VOID_P == 8
973 ins->opcode = OP_LCONV_TO_U;
975 ins->opcode = OP_MOVE;
979 ins->opcode = OP_LCONV_TO_U;
982 ins->opcode = OP_FCONV_TO_U;
988 ins->type = STACK_I8;
989 ins->opcode += unops_op_map [src1->type];
991 case CEE_CONV_OVF_I8:
992 case CEE_CONV_OVF_U8:
993 ins->type = STACK_I8;
994 ins->opcode += ovf3ops_op_map [src1->type];
996 case CEE_CONV_OVF_U8_UN:
997 case CEE_CONV_OVF_I8_UN:
998 ins->type = STACK_I8;
999 ins->opcode += ovf2ops_op_map [src1->type];
1003 ins->type = STACK_R8;
1004 ins->opcode += unops_op_map [src1->type];
1007 ins->type = STACK_R8;
1011 ins->type = STACK_I4;
1012 ins->opcode += ovfops_op_map [src1->type];
1015 case CEE_CONV_OVF_I:
1016 case CEE_CONV_OVF_U:
1017 ins->type = STACK_PTR;
1018 ins->opcode += ovfops_op_map [src1->type];
1021 case CEE_ADD_OVF_UN:
1023 case CEE_MUL_OVF_UN:
1025 case CEE_SUB_OVF_UN:
1026 ins->type = bin_num_table [src1->type] [src2->type];
1027 ins->opcode += ovfops_op_map [src1->type];
1028 if (ins->type == STACK_R8)
1029 ins->type = STACK_INV;
1031 case OP_LOAD_MEMBASE:
1032 ins->type = STACK_PTR;
1034 case OP_LOADI1_MEMBASE:
1035 case OP_LOADU1_MEMBASE:
1036 case OP_LOADI2_MEMBASE:
1037 case OP_LOADU2_MEMBASE:
1038 case OP_LOADI4_MEMBASE:
1039 case OP_LOADU4_MEMBASE:
1040 ins->type = STACK_PTR;
1042 case OP_LOADI8_MEMBASE:
1043 ins->type = STACK_I8;
1045 case OP_LOADR4_MEMBASE:
1046 case OP_LOADR8_MEMBASE:
1047 ins->type = STACK_R8;
1050 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
1054 if (ins->type == STACK_MP)
1055 ins->klass = mono_defaults.object_class;
1060 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
1066 param_table [STACK_MAX] [STACK_MAX] = {
1071 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
1075 switch (args->type) {
1085 for (i = 0; i < sig->param_count; ++i) {
1086 switch (args [i].type) {
1090 if (!sig->params [i]->byref)
1094 if (sig->params [i]->byref)
1096 switch (sig->params [i]->type) {
1097 case MONO_TYPE_CLASS:
1098 case MONO_TYPE_STRING:
1099 case MONO_TYPE_OBJECT:
1100 case MONO_TYPE_SZARRAY:
1101 case MONO_TYPE_ARRAY:
1108 if (sig->params [i]->byref)
1110 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1119 /*if (!param_table [args [i].type] [sig->params [i]->type])
1127 * When we need a pointer to the current domain many times in a method, we
1128 * call mono_domain_get() once and we store the result in a local variable.
1129 * This function returns the variable that represents the MonoDomain*.
1131 inline static MonoInst *
1132 mono_get_domainvar (MonoCompile *cfg)
1134 if (!cfg->domainvar)
1135 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1136 return cfg->domainvar;
1140 * The got_var contains the address of the Global Offset Table when AOT
1144 mono_get_got_var (MonoCompile *cfg)
1146 #ifdef MONO_ARCH_NEED_GOT_VAR
1147 if (!cfg->compile_aot)
1149 if (!cfg->got_var) {
1150 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1152 return cfg->got_var;
1159 mono_get_vtable_var (MonoCompile *cfg)
1161 g_assert (cfg->generic_sharing_context);
1163 if (!cfg->rgctx_var) {
1164 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1165 /* force the var to be stack allocated */
1166 cfg->rgctx_var->flags |= MONO_INST_VOLATILE;
1169 return cfg->rgctx_var;
1173 type_from_stack_type (MonoInst *ins) {
1174 switch (ins->type) {
1175 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1176 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1177 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1178 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1180 return &ins->klass->this_arg;
1181 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1182 case STACK_VTYPE: return &ins->klass->byval_arg;
1184 g_error ("stack type %d to monotype not handled\n", ins->type);
1189 static G_GNUC_UNUSED int
1190 type_to_stack_type (MonoType *t)
1192 t = mono_type_get_underlying_type (t);
1196 case MONO_TYPE_BOOLEAN:
1199 case MONO_TYPE_CHAR:
1206 case MONO_TYPE_FNPTR:
1208 case MONO_TYPE_CLASS:
1209 case MONO_TYPE_STRING:
1210 case MONO_TYPE_OBJECT:
1211 case MONO_TYPE_SZARRAY:
1212 case MONO_TYPE_ARRAY:
1220 case MONO_TYPE_VALUETYPE:
1221 case MONO_TYPE_TYPEDBYREF:
1223 case MONO_TYPE_GENERICINST:
1224 if (mono_type_generic_inst_is_valuetype (t))
1230 g_assert_not_reached ();
1237 array_access_to_klass (int opcode)
1241 return mono_defaults.byte_class;
1243 return mono_defaults.uint16_class;
1246 return mono_defaults.int_class;
1249 return mono_defaults.sbyte_class;
1252 return mono_defaults.int16_class;
1255 return mono_defaults.int32_class;
1257 return mono_defaults.uint32_class;
1260 return mono_defaults.int64_class;
1263 return mono_defaults.single_class;
1266 return mono_defaults.double_class;
1267 case CEE_LDELEM_REF:
1268 case CEE_STELEM_REF:
1269 return mono_defaults.object_class;
1271 g_assert_not_reached ();
1277 * We try to share variables when possible
1280 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1285 /* inlining can result in deeper stacks */
1286 if (slot >= cfg->header->max_stack)
1287 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1289 pos = ins->type - 1 + slot * STACK_MAX;
1291 switch (ins->type) {
1298 if ((vnum = cfg->intvars [pos]))
1299 return cfg->varinfo [vnum];
1300 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1301 cfg->intvars [pos] = res->inst_c0;
1304 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1310 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1313 * Don't use this if a generic_context is set, since that means AOT can't
1314 * look up the method using just the image+token.
1315 * table == 0 means this is a reference made from a wrapper.
1317 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1318 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1319 jump_info_token->image = image;
1320 jump_info_token->token = token;
1321 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1326 * This function is called to handle items that are left on the evaluation stack
1327 * at basic block boundaries. What happens is that we save the values to local variables
1328 * and we reload them later when first entering the target basic block (with the
1329 * handle_loaded_temps () function).
1330 * A single joint point will use the same variables (stored in the array bb->out_stack or
1331 * bb->in_stack, if the basic block is before or after the joint point).
1333 * This function needs to be called _before_ emitting the last instruction of
1334 * the bb (i.e. before emitting a branch).
1335 * If the stack merge fails at a join point, cfg->unverifiable is set.
1338 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1341 MonoBasicBlock *bb = cfg->cbb;
1342 MonoBasicBlock *outb;
1343 MonoInst *inst, **locals;
1348 if (cfg->verbose_level > 3)
1349 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1350 if (!bb->out_scount) {
1351 bb->out_scount = count;
1352 //printf ("bblock %d has out:", bb->block_num);
1354 for (i = 0; i < bb->out_count; ++i) {
1355 outb = bb->out_bb [i];
1356 /* exception handlers are linked, but they should not be considered for stack args */
1357 if (outb->flags & BB_EXCEPTION_HANDLER)
1359 //printf (" %d", outb->block_num);
1360 if (outb->in_stack) {
1362 bb->out_stack = outb->in_stack;
1368 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1369 for (i = 0; i < count; ++i) {
1371 * try to reuse temps already allocated for this purpouse, if they occupy the same
1372 * stack slot and if they are of the same type.
1373 * This won't cause conflicts since if 'local' is used to
1374 * store one of the values in the in_stack of a bblock, then
1375 * the same variable will be used for the same outgoing stack
1377 * This doesn't work when inlining methods, since the bblocks
1378 * in the inlined methods do not inherit their in_stack from
1379 * the bblock they are inlined to. See bug #58863 for an
1382 if (cfg->inlined_method)
1383 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1385 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1390 for (i = 0; i < bb->out_count; ++i) {
1391 outb = bb->out_bb [i];
1392 /* exception handlers are linked, but they should not be considered for stack args */
1393 if (outb->flags & BB_EXCEPTION_HANDLER)
1395 if (outb->in_scount) {
1396 if (outb->in_scount != bb->out_scount) {
1397 cfg->unverifiable = TRUE;
1400 continue; /* check they are the same locals */
1402 outb->in_scount = count;
1403 outb->in_stack = bb->out_stack;
1406 locals = bb->out_stack;
1408 for (i = 0; i < count; ++i) {
1409 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1410 inst->cil_code = sp [i]->cil_code;
1411 sp [i] = locals [i];
1412 if (cfg->verbose_level > 3)
1413 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1417 * It is possible that the out bblocks already have in_stack assigned, and
1418 * the in_stacks differ. In this case, we will store to all the different
1425 /* Find a bblock which has a different in_stack */
1427 while (bindex < bb->out_count) {
1428 outb = bb->out_bb [bindex];
1429 /* exception handlers are linked, but they should not be considered for stack args */
1430 if (outb->flags & BB_EXCEPTION_HANDLER) {
1434 if (outb->in_stack != locals) {
1435 for (i = 0; i < count; ++i) {
1436 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1437 inst->cil_code = sp [i]->cil_code;
1438 sp [i] = locals [i];
1439 if (cfg->verbose_level > 3)
1440 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1442 locals = outb->in_stack;
1451 /* Emit code which loads interface_offsets [klass->interface_id]
1452 * The array is stored in memory before vtable.
1455 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1457 if (cfg->compile_aot) {
1458 int ioffset_reg = alloc_preg (cfg);
1459 int iid_reg = alloc_preg (cfg);
1461 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1462 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1463 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1466 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1471 mini_emit_interface_bitmap_check (MonoCompile *cfg, int intf_bit_reg, int base_reg, int offset, MonoClass *klass)
1473 int ibitmap_reg = alloc_preg (cfg);
1474 #ifdef COMPRESSED_INTERFACE_BITMAP
1476 MonoInst *res, *ins;
1477 NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, ibitmap_reg, base_reg, offset);
1478 MONO_ADD_INS (cfg->cbb, ins);
1480 if (cfg->compile_aot)
1481 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_IID, klass);
1483 EMIT_NEW_ICONST (cfg, args [1], klass->interface_id);
1484 res = mono_emit_jit_icall (cfg, mono_class_interface_match, args);
1485 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, intf_bit_reg, res->dreg);
1487 int ibitmap_byte_reg = alloc_preg (cfg);
1489 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, base_reg, offset);
1491 if (cfg->compile_aot) {
1492 int iid_reg = alloc_preg (cfg);
1493 int shifted_iid_reg = alloc_preg (cfg);
1494 int ibitmap_byte_address_reg = alloc_preg (cfg);
1495 int masked_iid_reg = alloc_preg (cfg);
1496 int iid_one_bit_reg = alloc_preg (cfg);
1497 int iid_bit_reg = alloc_preg (cfg);
1498 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1499 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1500 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1501 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1502 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1503 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1504 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1505 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1507 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1508 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1514 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1515 * stored in "klass_reg" implements the interface "klass".
1518 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1520 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, interface_bitmap), klass);
1524 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1525 * stored in "vtable_reg" implements the interface "klass".
1528 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1530 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, interface_bitmap), klass);
1534 * Emit code which checks whenever the interface id of @klass is smaller than
1535 * than the value given by max_iid_reg.
1538 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1539 MonoBasicBlock *false_target)
1541 if (cfg->compile_aot) {
1542 int iid_reg = alloc_preg (cfg);
1543 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1544 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1547 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1549 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1551 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1554 /* Same as above, but obtains max_iid from a vtable */
1556 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1557 MonoBasicBlock *false_target)
1559 int max_iid_reg = alloc_preg (cfg);
1561 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, max_interface_id));
1562 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1565 /* Same as above, but obtains max_iid from a klass */
1567 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1568 MonoBasicBlock *false_target)
1570 int max_iid_reg = alloc_preg (cfg);
1572 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, max_interface_id));
1573 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1577 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1579 int idepth_reg = alloc_preg (cfg);
1580 int stypes_reg = alloc_preg (cfg);
1581 int stype = alloc_preg (cfg);
1583 mono_class_setup_supertypes (klass);
1585 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1586 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1587 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1588 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1590 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1591 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1593 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1594 } else if (cfg->compile_aot) {
1595 int const_reg = alloc_preg (cfg);
1596 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1597 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1599 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1601 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1605 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1607 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1611 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1613 int intf_reg = alloc_preg (cfg);
1615 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1616 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1617 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1619 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1621 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1625 * Variant of the above that takes a register to the class, not the vtable.
1628 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1630 int intf_bit_reg = alloc_preg (cfg);
1632 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1633 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1634 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1636 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1638 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1642 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1645 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1646 } else if (cfg->compile_aot) {
1647 int const_reg = alloc_preg (cfg);
1648 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1649 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1651 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1653 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1657 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1659 mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1663 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1665 if (cfg->compile_aot) {
1666 int const_reg = alloc_preg (cfg);
1667 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1668 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1670 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1672 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1676 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1679 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1682 int rank_reg = alloc_preg (cfg);
1683 int eclass_reg = alloc_preg (cfg);
1685 g_assert (!klass_inst);
1686 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, rank));
1687 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1688 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1689 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1690 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
1691 if (klass->cast_class == mono_defaults.object_class) {
1692 int parent_reg = alloc_preg (cfg);
1693 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
1694 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1695 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1696 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1697 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1698 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1699 } else if (klass->cast_class == mono_defaults.enum_class) {
1700 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1701 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1702 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1704 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1705 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1708 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1709 /* Check that the object is a vector too */
1710 int bounds_reg = alloc_preg (cfg);
1711 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
1712 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1713 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1716 int idepth_reg = alloc_preg (cfg);
1717 int stypes_reg = alloc_preg (cfg);
1718 int stype = alloc_preg (cfg);
1720 mono_class_setup_supertypes (klass);
1722 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1723 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1724 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1725 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1727 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1728 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1729 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1734 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1736 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1740 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1744 g_assert (val == 0);
1749 if ((size <= 4) && (size <= align)) {
1752 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1755 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1758 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1760 #if SIZEOF_REGISTER == 8
1762 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1768 val_reg = alloc_preg (cfg);
1770 if (SIZEOF_REGISTER == 8)
1771 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1773 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1776 /* This could be optimized further if neccesary */
1778 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1785 #if !NO_UNALIGNED_ACCESS
1786 if (SIZEOF_REGISTER == 8) {
1788 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1793 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1801 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1806 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1811 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1818 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1825 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1826 g_assert (size < 10000);
1829 /* This could be optimized further if neccesary */
1831 cur_reg = alloc_preg (cfg);
1832 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1833 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1840 #if !NO_UNALIGNED_ACCESS
1841 if (SIZEOF_REGISTER == 8) {
1843 cur_reg = alloc_preg (cfg);
1844 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1845 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1854 cur_reg = alloc_preg (cfg);
1855 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1856 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1862 cur_reg = alloc_preg (cfg);
1863 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1864 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1870 cur_reg = alloc_preg (cfg);
1871 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1872 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1880 emit_tls_set (MonoCompile *cfg, int sreg1, int tls_key)
1884 if (cfg->compile_aot) {
1885 EMIT_NEW_TLS_OFFSETCONST (cfg, c, tls_key);
1886 MONO_INST_NEW (cfg, ins, OP_TLS_SET_REG);
1888 ins->sreg2 = c->dreg;
1889 MONO_ADD_INS (cfg->cbb, ins);
1891 MONO_INST_NEW (cfg, ins, OP_TLS_SET);
1893 ins->inst_offset = mini_get_tls_offset (tls_key);
1894 MONO_ADD_INS (cfg->cbb, ins);
1901 * Emit IR to push the current LMF onto the LMF stack.
1904 emit_push_lmf (MonoCompile *cfg)
1907 * Emit IR to push the LMF:
1908 * lmf_addr = <lmf_addr from tls>
1909 * lmf->lmf_addr = lmf_addr
1910 * lmf->prev_lmf = *lmf_addr
1913 int lmf_reg, prev_lmf_reg;
1914 MonoInst *ins, *lmf_ins;
1919 if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
1920 /* Load current lmf */
1921 lmf_ins = mono_get_lmf_intrinsic (cfg);
1923 MONO_ADD_INS (cfg->cbb, lmf_ins);
1924 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
1925 lmf_reg = ins->dreg;
1926 /* Save previous_lmf */
1927 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, G_STRUCT_OFFSET (MonoLMF, previous_lmf), lmf_ins->dreg);
1929 emit_tls_set (cfg, lmf_reg, TLS_KEY_LMF);
1932 * Store lmf_addr in a variable, so it can be allocated to a global register.
1934 if (!cfg->lmf_addr_var)
1935 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1938 ins = mono_get_jit_tls_intrinsic (cfg);
1940 int jit_tls_dreg = ins->dreg;
1942 MONO_ADD_INS (cfg->cbb, ins);
1943 lmf_reg = alloc_preg (cfg);
1944 EMIT_NEW_BIALU_IMM (cfg, lmf_ins, OP_PADD_IMM, lmf_reg, jit_tls_dreg, G_STRUCT_OFFSET (MonoJitTlsData, lmf));
1946 lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
1949 lmf_ins = mono_get_lmf_addr_intrinsic (cfg);
1951 MONO_ADD_INS (cfg->cbb, lmf_ins);
1953 lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
1955 lmf_ins->dreg = cfg->lmf_addr_var->dreg;
1957 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
1958 lmf_reg = ins->dreg;
1960 prev_lmf_reg = alloc_preg (cfg);
1961 /* Save previous_lmf */
1962 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, cfg->lmf_addr_var->dreg, 0);
1963 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, G_STRUCT_OFFSET (MonoLMF, previous_lmf), prev_lmf_reg);
1965 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, cfg->lmf_addr_var->dreg, 0, lmf_reg);
1972 * Emit IR to pop the current LMF from the LMF stack.
1975 emit_pop_lmf (MonoCompile *cfg)
1977 int lmf_reg, lmf_addr_reg, prev_lmf_reg;
1983 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
1984 lmf_reg = ins->dreg;
1986 if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
1987 /* Load previous_lmf */
1988 prev_lmf_reg = alloc_preg (cfg);
1989 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
1991 emit_tls_set (cfg, prev_lmf_reg, TLS_KEY_LMF);
1994 * Emit IR to pop the LMF:
1995 * *(lmf->lmf_addr) = lmf->prev_lmf
1997 /* This could be called before emit_push_lmf () */
1998 if (!cfg->lmf_addr_var)
1999 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2000 lmf_addr_reg = cfg->lmf_addr_var->dreg;
2002 prev_lmf_reg = alloc_preg (cfg);
2003 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
2004 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_addr_reg, 0, prev_lmf_reg);
2009 ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
2012 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2015 type = mini_get_basic_type_from_generic (gsctx, type);
2016 type = mini_replace_type (type);
2017 switch (type->type) {
2018 case MONO_TYPE_VOID:
2019 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALL_MEMBASE: OP_VOIDCALL;
2022 case MONO_TYPE_BOOLEAN:
2025 case MONO_TYPE_CHAR:
2028 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2032 case MONO_TYPE_FNPTR:
2033 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2034 case MONO_TYPE_CLASS:
2035 case MONO_TYPE_STRING:
2036 case MONO_TYPE_OBJECT:
2037 case MONO_TYPE_SZARRAY:
2038 case MONO_TYPE_ARRAY:
2039 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2042 return calli? OP_LCALL_REG: virt? OP_LCALL_MEMBASE: OP_LCALL;
2045 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
2046 case MONO_TYPE_VALUETYPE:
2047 if (type->data.klass->enumtype) {
2048 type = mono_class_enum_basetype (type->data.klass);
2051 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2052 case MONO_TYPE_TYPEDBYREF:
2053 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2054 case MONO_TYPE_GENERICINST:
2055 type = &type->data.generic_class->container_class->byval_arg;
2058 case MONO_TYPE_MVAR:
2060 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2062 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
2068 * target_type_is_incompatible:
2069 * @cfg: MonoCompile context
2071 * Check that the item @arg on the evaluation stack can be stored
2072 * in the target type (can be a local, or field, etc).
2073 * The cfg arg can be used to check if we need verification or just
2076 * Returns: non-0 value if arg can't be stored on a target.
2079 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
2081 MonoType *simple_type;
2084 target = mini_replace_type (target);
2085 if (target->byref) {
2086 /* FIXME: check that the pointed to types match */
2087 if (arg->type == STACK_MP)
2088 return arg->klass != mono_class_from_mono_type (target);
2089 if (arg->type == STACK_PTR)
2094 simple_type = mono_type_get_underlying_type (target);
2095 switch (simple_type->type) {
2096 case MONO_TYPE_VOID:
2100 case MONO_TYPE_BOOLEAN:
2103 case MONO_TYPE_CHAR:
2106 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
2110 /* STACK_MP is needed when setting pinned locals */
2111 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2116 case MONO_TYPE_FNPTR:
2118 * Some opcodes like ldloca returns 'transient pointers' which can be stored in
2119 * in native int. (#688008).
2121 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2124 case MONO_TYPE_CLASS:
2125 case MONO_TYPE_STRING:
2126 case MONO_TYPE_OBJECT:
2127 case MONO_TYPE_SZARRAY:
2128 case MONO_TYPE_ARRAY:
2129 if (arg->type != STACK_OBJ)
2131 /* FIXME: check type compatibility */
2135 if (arg->type != STACK_I8)
2140 if (arg->type != STACK_R8)
2143 case MONO_TYPE_VALUETYPE:
2144 if (arg->type != STACK_VTYPE)
2146 klass = mono_class_from_mono_type (simple_type);
2147 if (klass != arg->klass)
2150 case MONO_TYPE_TYPEDBYREF:
2151 if (arg->type != STACK_VTYPE)
2153 klass = mono_class_from_mono_type (simple_type);
2154 if (klass != arg->klass)
2157 case MONO_TYPE_GENERICINST:
2158 if (mono_type_generic_inst_is_valuetype (simple_type)) {
2159 if (arg->type != STACK_VTYPE)
2161 klass = mono_class_from_mono_type (simple_type);
2162 if (klass != arg->klass)
2166 if (arg->type != STACK_OBJ)
2168 /* FIXME: check type compatibility */
2172 case MONO_TYPE_MVAR:
2173 g_assert (cfg->generic_sharing_context);
2174 if (mini_type_var_is_vt (cfg, simple_type)) {
2175 if (arg->type != STACK_VTYPE)
2178 if (arg->type != STACK_OBJ)
2183 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
2189 * Prepare arguments for passing to a function call.
2190 * Return a non-zero value if the arguments can't be passed to the given
2192 * The type checks are not yet complete and some conversions may need
2193 * casts on 32 or 64 bit architectures.
2195 * FIXME: implement this using target_type_is_incompatible ()
2198 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
2200 MonoType *simple_type;
2204 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
2208 for (i = 0; i < sig->param_count; ++i) {
2209 if (sig->params [i]->byref) {
2210 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
2214 simple_type = sig->params [i];
2215 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
2217 switch (simple_type->type) {
2218 case MONO_TYPE_VOID:
2223 case MONO_TYPE_BOOLEAN:
2226 case MONO_TYPE_CHAR:
2229 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2235 case MONO_TYPE_FNPTR:
2236 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2239 case MONO_TYPE_CLASS:
2240 case MONO_TYPE_STRING:
2241 case MONO_TYPE_OBJECT:
2242 case MONO_TYPE_SZARRAY:
2243 case MONO_TYPE_ARRAY:
2244 if (args [i]->type != STACK_OBJ)
2249 if (args [i]->type != STACK_I8)
2254 if (args [i]->type != STACK_R8)
2257 case MONO_TYPE_VALUETYPE:
2258 if (simple_type->data.klass->enumtype) {
2259 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2262 if (args [i]->type != STACK_VTYPE)
2265 case MONO_TYPE_TYPEDBYREF:
2266 if (args [i]->type != STACK_VTYPE)
2269 case MONO_TYPE_GENERICINST:
2270 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2273 case MONO_TYPE_MVAR:
2275 if (args [i]->type != STACK_VTYPE)
2279 g_error ("unknown type 0x%02x in check_call_signature",
2287 callvirt_to_call (int opcode)
2290 case OP_CALL_MEMBASE:
2292 case OP_VOIDCALL_MEMBASE:
2294 case OP_FCALL_MEMBASE:
2296 case OP_VCALL_MEMBASE:
2298 case OP_LCALL_MEMBASE:
2301 g_assert_not_reached ();
2307 #ifdef MONO_ARCH_HAVE_IMT
2308 /* Either METHOD or IMT_ARG needs to be set */
2310 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoMethod *method, MonoInst *imt_arg)
2314 if (COMPILE_LLVM (cfg)) {
2315 method_reg = alloc_preg (cfg);
2318 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2319 } else if (cfg->compile_aot) {
2320 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, method, MONO_PATCH_INFO_METHODCONST);
2323 MONO_INST_NEW (cfg, ins, OP_PCONST);
2324 ins->inst_p0 = method;
2325 ins->dreg = method_reg;
2326 MONO_ADD_INS (cfg->cbb, ins);
2330 call->imt_arg_reg = method_reg;
2332 #ifdef MONO_ARCH_IMT_REG
2333 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2335 /* Need this to keep the IMT arg alive */
2336 mono_call_inst_add_outarg_reg (cfg, call, method_reg, 0, FALSE);
2341 #ifdef MONO_ARCH_IMT_REG
2342 method_reg = alloc_preg (cfg);
2345 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2346 } else if (cfg->compile_aot) {
2347 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, method, MONO_PATCH_INFO_METHODCONST);
2350 MONO_INST_NEW (cfg, ins, OP_PCONST);
2351 ins->inst_p0 = method;
2352 ins->dreg = method_reg;
2353 MONO_ADD_INS (cfg->cbb, ins);
2356 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2358 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2363 static MonoJumpInfo *
2364 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2366 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2370 ji->data.target = target;
2376 mini_class_check_context_used (MonoCompile *cfg, MonoClass *klass)
2378 if (cfg->generic_sharing_context)
2379 return mono_class_check_context_used (klass);
2385 mini_method_check_context_used (MonoCompile *cfg, MonoMethod *method)
2387 if (cfg->generic_sharing_context)
2388 return mono_method_check_context_used (method);
2394 * check_method_sharing:
2396 * Check whenever the vtable or an mrgctx needs to be passed when calling CMETHOD.
2399 check_method_sharing (MonoCompile *cfg, MonoMethod *cmethod, gboolean *out_pass_vtable, gboolean *out_pass_mrgctx)
2401 gboolean pass_vtable = FALSE;
2402 gboolean pass_mrgctx = FALSE;
2404 if (((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
2405 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
2406 gboolean sharable = FALSE;
2408 if (mono_method_is_generic_sharable (cmethod, TRUE)) {
2411 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
2412 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
2413 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
2415 sharable = sharing_enabled && context_sharable;
2419 * Pass vtable iff target method might
2420 * be shared, which means that sharing
2421 * is enabled for its class and its
2422 * context is sharable (and it's not a
2425 if (sharable && !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
2429 if (mini_method_get_context (cmethod) &&
2430 mini_method_get_context (cmethod)->method_inst) {
2431 g_assert (!pass_vtable);
2433 if (mono_method_is_generic_sharable (cmethod, TRUE)) {
2436 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
2437 MonoGenericContext *context = mini_method_get_context (cmethod);
2438 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
2440 if (sharing_enabled && context_sharable)
2442 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, mono_method_signature (cmethod)))
2447 if (out_pass_vtable)
2448 *out_pass_vtable = pass_vtable;
2449 if (out_pass_mrgctx)
2450 *out_pass_mrgctx = pass_mrgctx;
2453 inline static MonoCallInst *
2454 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2455 MonoInst **args, int calli, int virtual, int tail, int rgctx, int unbox_trampoline)
2459 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2464 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2466 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
2469 call->signature = sig;
2470 call->rgctx_reg = rgctx;
2471 sig_ret = mini_replace_type (sig->ret);
2473 type_to_eval_stack_type ((cfg), sig_ret, &call->inst);
2476 if (mini_type_is_vtype (cfg, sig_ret)) {
2477 call->vret_var = cfg->vret_addr;
2478 //g_assert_not_reached ();
2480 } else if (mini_type_is_vtype (cfg, sig_ret)) {
2481 MonoInst *temp = mono_compile_create_var (cfg, sig_ret, OP_LOCAL);
2484 temp->backend.is_pinvoke = sig->pinvoke;
2487 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2488 * address of return value to increase optimization opportunities.
2489 * Before vtype decomposition, the dreg of the call ins itself represents the
2490 * fact the call modifies the return value. After decomposition, the call will
2491 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2492 * will be transformed into an LDADDR.
2494 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2495 loada->dreg = alloc_preg (cfg);
2496 loada->inst_p0 = temp;
2497 /* We reference the call too since call->dreg could change during optimization */
2498 loada->inst_p1 = call;
2499 MONO_ADD_INS (cfg->cbb, loada);
2501 call->inst.dreg = temp->dreg;
2503 call->vret_var = loada;
2504 } else if (!MONO_TYPE_IS_VOID (sig_ret))
2505 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2507 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2508 if (COMPILE_SOFT_FLOAT (cfg)) {
2510 * If the call has a float argument, we would need to do an r8->r4 conversion using
2511 * an icall, but that cannot be done during the call sequence since it would clobber
2512 * the call registers + the stack. So we do it before emitting the call.
2514 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2516 MonoInst *in = call->args [i];
2518 if (i >= sig->hasthis)
2519 t = sig->params [i - sig->hasthis];
2521 t = &mono_defaults.int_class->byval_arg;
2522 t = mono_type_get_underlying_type (t);
2524 if (!t->byref && t->type == MONO_TYPE_R4) {
2525 MonoInst *iargs [1];
2529 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2531 /* The result will be in an int vreg */
2532 call->args [i] = conv;
2538 call->need_unbox_trampoline = unbox_trampoline;
2541 if (COMPILE_LLVM (cfg))
2542 mono_llvm_emit_call (cfg, call);
2544 mono_arch_emit_call (cfg, call);
2546 mono_arch_emit_call (cfg, call);
2549 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2550 cfg->flags |= MONO_CFG_HAS_CALLS;
2556 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2558 #ifdef MONO_ARCH_RGCTX_REG
2559 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2560 cfg->uses_rgctx_reg = TRUE;
2561 call->rgctx_reg = TRUE;
2563 call->rgctx_arg_reg = rgctx_reg;
2570 inline static MonoInst*
2571 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *imt_arg, MonoInst *rgctx_arg)
2576 gboolean check_sp = FALSE;
2578 if (cfg->check_pinvoke_callconv && cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
2579 WrapperInfo *info = mono_marshal_get_wrapper_info (cfg->method);
2581 if (info && info->subtype == WRAPPER_SUBTYPE_PINVOKE)
2586 rgctx_reg = mono_alloc_preg (cfg);
2587 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2591 if (!cfg->stack_inbalance_var)
2592 cfg->stack_inbalance_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2594 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2595 ins->dreg = cfg->stack_inbalance_var->dreg;
2596 MONO_ADD_INS (cfg->cbb, ins);
2599 call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE, rgctx_arg ? TRUE : FALSE, FALSE);
2601 call->inst.sreg1 = addr->dreg;
2604 emit_imt_argument (cfg, call, NULL, imt_arg);
2606 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2611 sp_reg = mono_alloc_preg (cfg);
2613 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2615 MONO_ADD_INS (cfg->cbb, ins);
2617 /* Restore the stack so we don't crash when throwing the exception */
2618 MONO_INST_NEW (cfg, ins, OP_SET_SP);
2619 ins->sreg1 = cfg->stack_inbalance_var->dreg;
2620 MONO_ADD_INS (cfg->cbb, ins);
2622 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, cfg->stack_inbalance_var->dreg, sp_reg);
2623 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ExecutionEngineException");
2627 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2629 return (MonoInst*)call;
2633 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2636 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type);
2638 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2641 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig, gboolean tail,
2642 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *rgctx_arg)
2644 #ifndef DISABLE_REMOTING
2645 gboolean might_be_remote = FALSE;
2647 gboolean virtual = this != NULL;
2648 gboolean enable_for_aot = TRUE;
2652 gboolean need_unbox_trampoline;
2655 sig = mono_method_signature (method);
2658 rgctx_reg = mono_alloc_preg (cfg);
2659 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2662 if (method->string_ctor) {
2663 /* Create the real signature */
2664 /* FIXME: Cache these */
2665 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2666 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2671 context_used = mini_method_check_context_used (cfg, method);
2673 #ifndef DISABLE_REMOTING
2674 might_be_remote = this && sig->hasthis &&
2675 (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) &&
2676 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && (!MONO_CHECK_THIS (this) || context_used);
2678 if (might_be_remote && context_used) {
2681 g_assert (cfg->generic_sharing_context);
2683 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2685 return mono_emit_calli (cfg, sig, args, addr, NULL, NULL);
2689 need_unbox_trampoline = method->klass == mono_defaults.object_class || (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE);
2691 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, tail, rgctx_arg ? TRUE : FALSE, need_unbox_trampoline);
2693 #ifndef DISABLE_REMOTING
2694 if (might_be_remote)
2695 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2698 call->method = method;
2699 call->inst.flags |= MONO_INST_HAS_METHOD;
2700 call->inst.inst_left = this;
2701 call->tail_call = tail;
2704 int vtable_reg, slot_reg, this_reg;
2707 this_reg = this->dreg;
2709 if (ARCH_HAVE_DELEGATE_TRAMPOLINES && (method->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (method->name, "Invoke")) {
2710 MonoInst *dummy_use;
2712 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2714 /* Make a call to delegate->invoke_impl */
2715 call->inst.inst_basereg = this_reg;
2716 call->inst.inst_offset = G_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2717 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2719 /* We must emit a dummy use here because the delegate trampoline will
2720 replace the 'this' argument with the delegate target making this activation
2721 no longer a root for the delegate.
2722 This is an issue for delegates that target collectible code such as dynamic
2723 methods of GC'able assemblies.
2725 For a test case look into #667921.
2727 FIXME: a dummy use is not the best way to do it as the local register allocator
2728 will put it on a caller save register and spil it around the call.
2729 Ideally, we would either put it on a callee save register or only do the store part.
2731 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [0]);
2733 return (MonoInst*)call;
2736 if ((!cfg->compile_aot || enable_for_aot) &&
2737 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2738 (MONO_METHOD_IS_FINAL (method) &&
2739 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2740 !(mono_class_is_marshalbyref (method->klass) && context_used)) {
2742 * the method is not virtual, we just need to ensure this is not null
2743 * and then we can call the method directly.
2745 #ifndef DISABLE_REMOTING
2746 if (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) {
2748 * The check above ensures method is not gshared, this is needed since
2749 * gshared methods can't have wrappers.
2751 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2755 if (!method->string_ctor)
2756 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2758 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2759 } else if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2761 * the method is virtual, but we can statically dispatch since either
2762 * it's class or the method itself are sealed.
2763 * But first we need to ensure it's not a null reference.
2765 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2767 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2769 vtable_reg = alloc_preg (cfg);
2770 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2771 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2773 #ifdef MONO_ARCH_HAVE_IMT
2775 guint32 imt_slot = mono_method_get_imt_slot (method);
2776 emit_imt_argument (cfg, call, call->method, imt_arg);
2777 slot_reg = vtable_reg;
2778 offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2781 if (slot_reg == -1) {
2782 slot_reg = alloc_preg (cfg);
2783 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2784 offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
2787 slot_reg = vtable_reg;
2788 offset = G_STRUCT_OFFSET (MonoVTable, vtable) +
2789 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2790 #ifdef MONO_ARCH_HAVE_IMT
2792 g_assert (mono_method_signature (method)->generic_param_count);
2793 emit_imt_argument (cfg, call, call->method, imt_arg);
2798 call->inst.sreg1 = slot_reg;
2799 call->inst.inst_offset = offset;
2800 call->virtual = TRUE;
2804 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2807 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2809 return (MonoInst*)call;
2813 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2815 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), FALSE, args, this, NULL, NULL);
2819 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2826 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE, FALSE, FALSE);
2829 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2831 return (MonoInst*)call;
2835 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2837 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2841 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2845 * mono_emit_abs_call:
2847 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2849 inline static MonoInst*
2850 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2851 MonoMethodSignature *sig, MonoInst **args)
2853 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2857 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2860 if (cfg->abs_patches == NULL)
2861 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2862 g_hash_table_insert (cfg->abs_patches, ji, ji);
2863 ins = mono_emit_native_call (cfg, ji, sig, args);
2864 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2869 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2871 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2872 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2876 * Native code might return non register sized integers
2877 * without initializing the upper bits.
2879 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
2880 case OP_LOADI1_MEMBASE:
2881 widen_op = OP_ICONV_TO_I1;
2883 case OP_LOADU1_MEMBASE:
2884 widen_op = OP_ICONV_TO_U1;
2886 case OP_LOADI2_MEMBASE:
2887 widen_op = OP_ICONV_TO_I2;
2889 case OP_LOADU2_MEMBASE:
2890 widen_op = OP_ICONV_TO_U2;
2896 if (widen_op != -1) {
2897 int dreg = alloc_preg (cfg);
2900 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
2901 widen->type = ins->type;
2911 get_memcpy_method (void)
2913 static MonoMethod *memcpy_method = NULL;
2914 if (!memcpy_method) {
2915 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2917 g_error ("Old corlib found. Install a new one");
2919 return memcpy_method;
2923 create_write_barrier_bitmap (MonoCompile *cfg, MonoClass *klass, unsigned *wb_bitmap, int offset)
2925 MonoClassField *field;
2926 gpointer iter = NULL;
2928 while ((field = mono_class_get_fields (klass, &iter))) {
2931 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
2933 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
2934 if (mini_type_is_reference (cfg, mono_field_get_type (field))) {
2935 g_assert ((foffset % SIZEOF_VOID_P) == 0);
2936 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
2938 MonoClass *field_class = mono_class_from_mono_type (field->type);
2939 if (field_class->has_references)
2940 create_write_barrier_bitmap (cfg, field_class, wb_bitmap, offset + foffset);
2946 emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value)
2948 int card_table_shift_bits;
2949 gpointer card_table_mask;
2951 MonoInst *dummy_use;
2952 int nursery_shift_bits;
2953 size_t nursery_size;
2954 gboolean has_card_table_wb = FALSE;
2956 if (!cfg->gen_write_barriers)
2959 card_table = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
2961 mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
2963 #ifdef MONO_ARCH_HAVE_CARD_TABLE_WBARRIER
2964 has_card_table_wb = TRUE;
2967 if (has_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0 && !COMPILE_LLVM (cfg)) {
2970 MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
2971 wbarrier->sreg1 = ptr->dreg;
2972 wbarrier->sreg2 = value->dreg;
2973 MONO_ADD_INS (cfg->cbb, wbarrier);
2974 } else if (card_table) {
2975 int offset_reg = alloc_preg (cfg);
2976 int card_reg = alloc_preg (cfg);
2979 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
2980 if (card_table_mask)
2981 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
2983 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
2984 * IMM's larger than 32bits.
2986 if (cfg->compile_aot) {
2987 MONO_EMIT_NEW_AOTCONST (cfg, card_reg, NULL, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR);
2989 MONO_INST_NEW (cfg, ins, OP_PCONST);
2990 ins->inst_p0 = card_table;
2991 ins->dreg = card_reg;
2992 MONO_ADD_INS (cfg->cbb, ins);
2995 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
2996 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
2998 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
2999 mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
3002 EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
3006 mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
3008 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
3009 unsigned need_wb = 0;
3014 /*types with references can't have alignment smaller than sizeof(void*) */
3015 if (align < SIZEOF_VOID_P)
3018 /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
3019 if (size > 32 * SIZEOF_VOID_P)
3022 create_write_barrier_bitmap (cfg, klass, &need_wb, 0);
3024 /* We don't unroll more than 5 stores to avoid code bloat. */
3025 if (size > 5 * SIZEOF_VOID_P) {
3026 /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
3027 size += (SIZEOF_VOID_P - 1);
3028 size &= ~(SIZEOF_VOID_P - 1);
3030 EMIT_NEW_ICONST (cfg, iargs [2], size);
3031 EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
3032 mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
3036 destreg = iargs [0]->dreg;
3037 srcreg = iargs [1]->dreg;
3040 dest_ptr_reg = alloc_preg (cfg);
3041 tmp_reg = alloc_preg (cfg);
3044 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
3046 while (size >= SIZEOF_VOID_P) {
3047 MonoInst *load_inst;
3048 MONO_INST_NEW (cfg, load_inst, OP_LOAD_MEMBASE);
3049 load_inst->dreg = tmp_reg;
3050 load_inst->inst_basereg = srcreg;
3051 load_inst->inst_offset = offset;
3052 MONO_ADD_INS (cfg->cbb, load_inst);
3054 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
3057 emit_write_barrier (cfg, iargs [0], load_inst);
3059 offset += SIZEOF_VOID_P;
3060 size -= SIZEOF_VOID_P;
3063 /*tmp += sizeof (void*)*/
3064 if (size >= SIZEOF_VOID_P) {
3065 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
3066 MONO_ADD_INS (cfg->cbb, iargs [0]);
3070 /* Those cannot be references since size < sizeof (void*) */
3072 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
3073 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
3079 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
3080 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
3086 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
3087 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
3096 * Emit code to copy a valuetype of type @klass whose address is stored in
3097 * @src->dreg to memory whose address is stored at @dest->dreg.
3100 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
3102 MonoInst *iargs [4];
3103 int context_used, n;
3105 MonoMethod *memcpy_method;
3106 MonoInst *size_ins = NULL;
3107 MonoInst *memcpy_ins = NULL;
3111 * This check breaks with spilled vars... need to handle it during verification anyway.
3112 * g_assert (klass && klass == src->klass && klass == dest->klass);
3115 if (mini_is_gsharedvt_klass (cfg, klass)) {
3117 context_used = mini_class_check_context_used (cfg, klass);
3118 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3119 memcpy_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_MEMCPY);
3123 n = mono_class_native_size (klass, &align);
3125 n = mono_class_value_size (klass, &align);
3127 /* if native is true there should be no references in the struct */
3128 if (cfg->gen_write_barriers && (klass->has_references || size_ins) && !native) {
3129 /* Avoid barriers when storing to the stack */
3130 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
3131 (dest->opcode == OP_LDADDR))) {
3137 context_used = mini_class_check_context_used (cfg, klass);
3139 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
3140 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
3142 } else if (context_used) {
3143 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3145 if (cfg->compile_aot) {
3146 EMIT_NEW_CLASSCONST (cfg, iargs [2], klass);
3148 EMIT_NEW_PCONST (cfg, iargs [2], klass);
3149 mono_class_compute_gc_descriptor (klass);
3154 mono_emit_jit_icall (cfg, mono_gsharedvt_value_copy, iargs);
3156 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
3161 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
3162 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
3163 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
3168 iargs [2] = size_ins;
3170 EMIT_NEW_ICONST (cfg, iargs [2], n);
3172 memcpy_method = get_memcpy_method ();
3174 mono_emit_calli (cfg, mono_method_signature (memcpy_method), iargs, memcpy_ins, NULL, NULL);
3176 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
3181 get_memset_method (void)
3183 static MonoMethod *memset_method = NULL;
3184 if (!memset_method) {
3185 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
3187 g_error ("Old corlib found. Install a new one");
3189 return memset_method;
3193 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
3195 MonoInst *iargs [3];
3196 int n, context_used;
3198 MonoMethod *memset_method;
3199 MonoInst *size_ins = NULL;
3200 MonoInst *bzero_ins = NULL;
3201 static MonoMethod *bzero_method;
3203 /* FIXME: Optimize this for the case when dest is an LDADDR */
3205 mono_class_init (klass);
3206 if (mini_is_gsharedvt_klass (cfg, klass)) {
3207 context_used = mini_class_check_context_used (cfg, klass);
3208 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3209 bzero_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_BZERO);
3211 bzero_method = mono_class_get_method_from_name (mono_defaults.string_class, "bzero_aligned_1", 2);
3212 g_assert (bzero_method);
3214 iargs [1] = size_ins;
3215 mono_emit_calli (cfg, mono_method_signature (bzero_method), iargs, bzero_ins, NULL, NULL);
3219 n = mono_class_value_size (klass, &align);
3221 if (n <= sizeof (gpointer) * 5) {
3222 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
3225 memset_method = get_memset_method ();
3227 EMIT_NEW_ICONST (cfg, iargs [1], 0);
3228 EMIT_NEW_ICONST (cfg, iargs [2], n);
3229 mono_emit_method_call (cfg, memset_method, iargs, NULL);
3234 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
3236 MonoInst *this = NULL;
3238 g_assert (cfg->generic_sharing_context);
3240 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
3241 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
3242 !method->klass->valuetype)
3243 EMIT_NEW_ARGLOAD (cfg, this, 0);
3245 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
3246 MonoInst *mrgctx_loc, *mrgctx_var;
3249 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
3251 mrgctx_loc = mono_get_vtable_var (cfg);
3252 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
3255 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
3256 MonoInst *vtable_loc, *vtable_var;
3260 vtable_loc = mono_get_vtable_var (cfg);
3261 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
3263 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
3264 MonoInst *mrgctx_var = vtable_var;
3267 vtable_reg = alloc_preg (cfg);
3268 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, G_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
3269 vtable_var->type = STACK_PTR;
3277 vtable_reg = alloc_preg (cfg);
3278 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3283 static MonoJumpInfoRgctxEntry *
3284 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, MonoRgctxInfoType info_type)
3286 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
3287 res->method = method;
3288 res->in_mrgctx = in_mrgctx;
3289 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
3290 res->data->type = patch_type;
3291 res->data->data.target = patch_data;
3292 res->info_type = info_type;
3297 static inline MonoInst*
3298 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3300 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
3304 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
3305 MonoClass *klass, MonoRgctxInfoType rgctx_type)
3307 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
3308 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3310 return emit_rgctx_fetch (cfg, rgctx, entry);
3314 emit_get_rgctx_sig (MonoCompile *cfg, int context_used,
3315 MonoMethodSignature *sig, MonoRgctxInfoType rgctx_type)
3317 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_SIGNATURE, sig, rgctx_type);
3318 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3320 return emit_rgctx_fetch (cfg, rgctx, entry);
3324 emit_get_rgctx_gsharedvt_call (MonoCompile *cfg, int context_used,
3325 MonoMethodSignature *sig, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3327 MonoJumpInfoGSharedVtCall *call_info;
3328 MonoJumpInfoRgctxEntry *entry;
3331 call_info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoGSharedVtCall));
3332 call_info->sig = sig;
3333 call_info->method = cmethod;
3335 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_CALL, call_info, rgctx_type);
3336 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3338 return emit_rgctx_fetch (cfg, rgctx, entry);
3343 emit_get_rgctx_gsharedvt_method (MonoCompile *cfg, int context_used,
3344 MonoMethod *cmethod, MonoGSharedVtMethodInfo *info)
3346 MonoJumpInfoRgctxEntry *entry;
3349 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_METHOD, info, MONO_RGCTX_INFO_METHOD_GSHAREDVT_INFO);
3350 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3352 return emit_rgctx_fetch (cfg, rgctx, entry);
3356 * emit_get_rgctx_method:
3358 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
3359 * normal constants, else emit a load from the rgctx.
3362 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
3363 MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3365 if (!context_used) {
3368 switch (rgctx_type) {
3369 case MONO_RGCTX_INFO_METHOD:
3370 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
3372 case MONO_RGCTX_INFO_METHOD_RGCTX:
3373 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
3376 g_assert_not_reached ();
3379 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
3380 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3382 return emit_rgctx_fetch (cfg, rgctx, entry);
3387 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
3388 MonoClassField *field, MonoRgctxInfoType rgctx_type)
3390 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
3391 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3393 return emit_rgctx_fetch (cfg, rgctx, entry);
3397 get_gsharedvt_info_slot (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3399 MonoGSharedVtMethodInfo *info = cfg->gsharedvt_info;
3400 MonoRuntimeGenericContextInfoTemplate *template;
3405 for (i = 0; i < info->num_entries; ++i) {
3406 MonoRuntimeGenericContextInfoTemplate *otemplate = &info->entries [i];
3408 if (otemplate->info_type == rgctx_type && otemplate->data == data && rgctx_type != MONO_RGCTX_INFO_LOCAL_OFFSET)
3412 if (info->num_entries == info->count_entries) {
3413 MonoRuntimeGenericContextInfoTemplate *new_entries;
3414 int new_count_entries = info->count_entries ? info->count_entries * 2 : 16;
3416 new_entries = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * new_count_entries);
3418 memcpy (new_entries, info->entries, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
3419 info->entries = new_entries;
3420 info->count_entries = new_count_entries;
3423 idx = info->num_entries;
3424 template = &info->entries [idx];
3425 template->info_type = rgctx_type;
3426 template->data = data;
3428 info->num_entries ++;
3434 * emit_get_gsharedvt_info:
3436 * This is similar to emit_get_rgctx_.., but loads the data from the gsharedvt info var instead of calling an rgctx fetch trampoline.
3439 emit_get_gsharedvt_info (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3444 idx = get_gsharedvt_info_slot (cfg, data, rgctx_type);
3445 /* Load info->entries [idx] */
3446 dreg = alloc_preg (cfg);
3447 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, cfg->gsharedvt_info_var->dreg, G_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
3453 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type)
3455 return emit_get_gsharedvt_info (cfg, &klass->byval_arg, rgctx_type);
3459 * On return the caller must check @klass for load errors.
3462 emit_generic_class_init (MonoCompile *cfg, MonoClass *klass)
3464 MonoInst *vtable_arg;
3468 context_used = mini_class_check_context_used (cfg, klass);
3471 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
3472 klass, MONO_RGCTX_INFO_VTABLE);
3474 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3478 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
3481 if (COMPILE_LLVM (cfg))
3482 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline_llvm, &vtable_arg);
3484 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable_arg);
3485 #ifdef MONO_ARCH_VTABLE_REG
3486 mono_call_inst_add_outarg_reg (cfg, call, vtable_arg->dreg, MONO_ARCH_VTABLE_REG, FALSE);
3487 cfg->uses_vtable_reg = TRUE;
3494 emit_seq_point (MonoCompile *cfg, MonoMethod *method, guint8* ip, gboolean intr_loc, gboolean nonempty_stack)
3498 if (cfg->gen_seq_points && cfg->method == method) {
3499 NEW_SEQ_POINT (cfg, ins, ip - cfg->header->code, intr_loc);
3501 ins->flags |= MONO_INST_NONEMPTY_STACK;
3502 MONO_ADD_INS (cfg->cbb, ins);
3507 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg, gboolean null_check, MonoBasicBlock **out_bblock)
3509 if (mini_get_debug_options ()->better_cast_details) {
3510 int to_klass_reg = alloc_preg (cfg);
3511 int vtable_reg = alloc_preg (cfg);
3512 int klass_reg = alloc_preg (cfg);
3513 MonoBasicBlock *is_null_bb = NULL;
3517 NEW_BBLOCK (cfg, is_null_bb);
3519 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3520 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3523 tls_get = mono_get_jit_tls_intrinsic (cfg);
3525 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3529 MONO_ADD_INS (cfg->cbb, tls_get);
3530 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3531 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3533 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3534 MONO_EMIT_NEW_PCONST (cfg, to_klass_reg, klass);
3535 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3538 MONO_START_BB (cfg, is_null_bb);
3540 *out_bblock = cfg->cbb;
3546 reset_cast_details (MonoCompile *cfg)
3548 /* Reset the variables holding the cast details */
3549 if (mini_get_debug_options ()->better_cast_details) {
3550 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3552 MONO_ADD_INS (cfg->cbb, tls_get);
3553 /* It is enough to reset the from field */
3554 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3559 * On return the caller must check @array_class for load errors
3562 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
3564 int vtable_reg = alloc_preg (cfg);
3567 context_used = mini_class_check_context_used (cfg, array_class);
3569 save_cast_details (cfg, array_class, obj->dreg, FALSE, NULL);
3571 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3573 if (cfg->opt & MONO_OPT_SHARED) {
3574 int class_reg = alloc_preg (cfg);
3575 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3576 if (cfg->compile_aot) {
3577 int klass_reg = alloc_preg (cfg);
3578 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
3579 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
3581 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
3583 } else if (context_used) {
3584 MonoInst *vtable_ins;
3586 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
3587 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
3589 if (cfg->compile_aot) {
3593 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3595 vt_reg = alloc_preg (cfg);
3596 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
3597 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
3600 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3602 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
3606 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
3608 reset_cast_details (cfg);
3612 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
3613 * generic code is generated.
3616 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
3618 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
3621 MonoInst *rgctx, *addr;
3623 /* FIXME: What if the class is shared? We might not
3624 have to get the address of the method from the
3626 addr = emit_get_rgctx_method (cfg, context_used, method,
3627 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3629 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3631 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
3633 gboolean pass_vtable, pass_mrgctx;
3634 MonoInst *rgctx_arg = NULL;
3636 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
3637 g_assert (!pass_mrgctx);
3640 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3643 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
3646 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
3651 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
3655 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
3656 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
3657 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
3658 int rank_reg = alloc_dreg (cfg ,STACK_I4);
3660 obj_reg = sp [0]->dreg;
3661 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3662 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3664 /* FIXME: generics */
3665 g_assert (klass->rank == 0);
3668 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
3669 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3671 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3672 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, element_class));
3675 MonoInst *element_class;
3677 /* This assertion is from the unboxcast insn */
3678 g_assert (klass->rank == 0);
3680 element_class = emit_get_rgctx_klass (cfg, context_used,
3681 klass->element_class, MONO_RGCTX_INFO_KLASS);
3683 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
3684 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3686 save_cast_details (cfg, klass->element_class, obj_reg, FALSE, NULL);
3687 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
3688 reset_cast_details (cfg);
3691 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), obj_reg, sizeof (MonoObject));
3692 MONO_ADD_INS (cfg->cbb, add);
3693 add->type = STACK_MP;
3700 handle_unbox_gsharedvt (MonoCompile *cfg, MonoClass *klass, MonoInst *obj, MonoBasicBlock **out_cbb)
3702 MonoInst *addr, *klass_inst, *is_ref, *args[16];
3703 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
3707 klass_inst = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_KLASS);
3713 args [1] = klass_inst;
3716 obj = mono_emit_jit_icall (cfg, mono_object_castclass_unbox, args);
3718 NEW_BBLOCK (cfg, is_ref_bb);
3719 NEW_BBLOCK (cfg, is_nullable_bb);
3720 NEW_BBLOCK (cfg, end_bb);
3721 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
3722 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 1);
3723 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
3725 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 2);
3726 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
3728 /* This will contain either the address of the unboxed vtype, or an address of the temporary where the ref is stored */
3729 addr_reg = alloc_dreg (cfg, STACK_MP);
3733 NEW_BIALU_IMM (cfg, addr, OP_ADD_IMM, addr_reg, obj->dreg, sizeof (MonoObject));
3734 MONO_ADD_INS (cfg->cbb, addr);
3736 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3739 MONO_START_BB (cfg, is_ref_bb);
3741 /* Save the ref to a temporary */
3742 dreg = alloc_ireg (cfg);
3743 EMIT_NEW_VARLOADA_VREG (cfg, addr, dreg, &klass->byval_arg);
3744 addr->dreg = addr_reg;
3745 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, obj->dreg);
3746 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3749 MONO_START_BB (cfg, is_nullable_bb);
3752 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_NULLABLE_CLASS_UNBOX);
3753 MonoInst *unbox_call;
3754 MonoMethodSignature *unbox_sig;
3757 var = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
3759 unbox_sig = mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
3760 unbox_sig->ret = &klass->byval_arg;
3761 unbox_sig->param_count = 1;
3762 unbox_sig->params [0] = &mono_defaults.object_class->byval_arg;
3763 unbox_call = mono_emit_calli (cfg, unbox_sig, &obj, addr, NULL, NULL);
3765 EMIT_NEW_VARLOADA_VREG (cfg, addr, unbox_call->dreg, &klass->byval_arg);
3766 addr->dreg = addr_reg;
3769 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3772 MONO_START_BB (cfg, end_bb);
3775 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr_reg, 0);
3777 *out_cbb = cfg->cbb;
3783 * Returns NULL and set the cfg exception on error.
3786 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
3788 MonoInst *iargs [2];
3794 MonoInst *iargs [2];
3796 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (klass, for_box);
3798 if (cfg->opt & MONO_OPT_SHARED)
3799 rgctx_info = MONO_RGCTX_INFO_KLASS;
3801 rgctx_info = MONO_RGCTX_INFO_VTABLE;
3802 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
3804 if (cfg->opt & MONO_OPT_SHARED) {
3805 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3807 alloc_ftn = mono_object_new;
3810 alloc_ftn = mono_object_new_specific;
3813 if (managed_alloc && !(cfg->opt & MONO_OPT_SHARED))
3814 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3816 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3819 if (cfg->opt & MONO_OPT_SHARED) {
3820 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3821 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
3823 alloc_ftn = mono_object_new;
3824 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
3825 /* This happens often in argument checking code, eg. throw new FooException... */
3826 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
3827 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
3828 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
3830 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3831 MonoMethod *managed_alloc = NULL;
3835 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
3836 cfg->exception_ptr = klass;
3840 #ifndef MONO_CROSS_COMPILE
3841 managed_alloc = mono_gc_get_managed_allocator (klass, for_box);
3844 if (managed_alloc) {
3845 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3846 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3848 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
3850 guint32 lw = vtable->klass->instance_size;
3851 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
3852 EMIT_NEW_ICONST (cfg, iargs [0], lw);
3853 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
3856 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3860 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3864 * Returns NULL and set the cfg exception on error.
3867 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used, MonoBasicBlock **out_cbb)
3869 MonoInst *alloc, *ins;
3871 *out_cbb = cfg->cbb;
3873 if (mono_class_is_nullable (klass)) {
3874 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
3877 /* FIXME: What if the class is shared? We might not
3878 have to get the method address from the RGCTX. */
3879 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
3880 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3881 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3883 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
3885 gboolean pass_vtable, pass_mrgctx;
3886 MonoInst *rgctx_arg = NULL;
3888 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
3889 g_assert (!pass_mrgctx);
3892 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3895 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
3898 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
3902 if (mini_is_gsharedvt_klass (cfg, klass)) {
3903 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
3904 MonoInst *res, *is_ref, *src_var, *addr;
3907 dreg = alloc_ireg (cfg);
3909 NEW_BBLOCK (cfg, is_ref_bb);
3910 NEW_BBLOCK (cfg, is_nullable_bb);
3911 NEW_BBLOCK (cfg, end_bb);
3912 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
3913 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 1);
3914 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
3916 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 2);
3917 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
3920 alloc = handle_alloc (cfg, klass, TRUE, context_used);
3923 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3924 ins->opcode = OP_STOREV_MEMBASE;
3926 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, alloc->dreg);
3927 res->type = STACK_OBJ;
3929 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3932 MONO_START_BB (cfg, is_ref_bb);
3933 addr_reg = alloc_ireg (cfg);
3935 /* val is a vtype, so has to load the value manually */
3936 src_var = get_vreg_to_inst (cfg, val->dreg);
3938 src_var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, val->dreg);
3939 EMIT_NEW_VARLOADA (cfg, addr, src_var, src_var->inst_vtype);
3940 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, addr->dreg, 0);
3941 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3944 MONO_START_BB (cfg, is_nullable_bb);
3947 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass,
3948 MONO_RGCTX_INFO_NULLABLE_CLASS_BOX);
3950 MonoMethodSignature *box_sig;
3953 * klass is Nullable<T>, need to call Nullable<T>.Box () using a gsharedvt signature, but we cannot
3954 * construct that method at JIT time, so have to do things by hand.
3956 box_sig = mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
3957 box_sig->ret = &mono_defaults.object_class->byval_arg;
3958 box_sig->param_count = 1;
3959 box_sig->params [0] = &klass->byval_arg;
3960 box_call = mono_emit_calli (cfg, box_sig, &val, addr, NULL, NULL);
3961 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, box_call->dreg);
3962 res->type = STACK_OBJ;
3966 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3968 MONO_START_BB (cfg, end_bb);
3970 *out_cbb = cfg->cbb;
3974 alloc = handle_alloc (cfg, klass, TRUE, context_used);
3978 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3985 mini_class_has_reference_variant_generic_argument (MonoCompile *cfg, MonoClass *klass, int context_used)
3988 MonoGenericContainer *container;
3989 MonoGenericInst *ginst;
3991 if (klass->generic_class) {
3992 container = klass->generic_class->container_class->generic_container;
3993 ginst = klass->generic_class->context.class_inst;
3994 } else if (klass->generic_container && context_used) {
3995 container = klass->generic_container;
3996 ginst = container->context.class_inst;
4001 for (i = 0; i < container->type_argc; ++i) {
4003 if (!(mono_generic_container_get_param_info (container, i)->flags & (MONO_GEN_PARAM_VARIANT|MONO_GEN_PARAM_COVARIANT)))
4005 type = ginst->type_argv [i];
4006 if (mini_type_is_reference (cfg, type))
4012 // FIXME: This doesn't work yet (class libs tests fail?)
4013 #define is_complex_isinst(klass) (TRUE || (klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || mono_class_is_marshalbyref (klass) || (klass->flags & TYPE_ATTRIBUTE_SEALED) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
4016 emit_castclass_with_cache (MonoCompile *cfg, MonoClass *klass, MonoInst **args, MonoBasicBlock **out_bblock)
4018 MonoMethod *mono_castclass;
4021 mono_castclass = mono_marshal_get_castclass_with_cache ();
4023 save_cast_details (cfg, klass, args [0]->dreg, TRUE, out_bblock);
4024 res = mono_emit_method_call (cfg, mono_castclass, args, NULL);
4025 reset_cast_details (cfg);
4031 * Returns NULL and set the cfg exception on error.
4034 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
4036 MonoBasicBlock *is_null_bb;
4037 int obj_reg = src->dreg;
4038 int vtable_reg = alloc_preg (cfg);
4039 MonoInst *klass_inst = NULL;
4044 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
4045 MonoInst *cache_ins;
4047 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
4052 /* klass - it's the second element of the cache entry*/
4053 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
4056 args [2] = cache_ins;
4058 return emit_castclass_with_cache (cfg, klass, args, NULL);
4061 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
4064 NEW_BBLOCK (cfg, is_null_bb);
4066 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4067 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
4069 save_cast_details (cfg, klass, obj_reg, FALSE, NULL);
4071 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4072 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4073 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
4075 int klass_reg = alloc_preg (cfg);
4077 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4079 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4080 /* the remoting code is broken, access the class for now */
4081 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4082 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4084 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4085 cfg->exception_ptr = klass;
4088 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4090 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4091 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4093 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
4095 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4096 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
4100 MONO_START_BB (cfg, is_null_bb);
4102 reset_cast_details (cfg);
4108 * Returns NULL and set the cfg exception on error.
4111 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
4114 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
4115 int obj_reg = src->dreg;
4116 int vtable_reg = alloc_preg (cfg);
4117 int res_reg = alloc_ireg_ref (cfg);
4118 MonoInst *klass_inst = NULL;
4123 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
4124 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
4125 MonoInst *cache_ins;
4127 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
4132 /* klass - it's the second element of the cache entry*/
4133 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
4136 args [2] = cache_ins;
4138 return mono_emit_method_call (cfg, mono_isinst, args, NULL);
4141 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
4144 NEW_BBLOCK (cfg, is_null_bb);
4145 NEW_BBLOCK (cfg, false_bb);
4146 NEW_BBLOCK (cfg, end_bb);
4148 /* Do the assignment at the beginning, so the other assignment can be if converted */
4149 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
4150 ins->type = STACK_OBJ;
4153 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4154 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
4156 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4158 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4159 g_assert (!context_used);
4160 /* the is_null_bb target simply copies the input register to the output */
4161 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
4163 int klass_reg = alloc_preg (cfg);
4166 int rank_reg = alloc_preg (cfg);
4167 int eclass_reg = alloc_preg (cfg);
4169 g_assert (!context_used);
4170 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
4171 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
4172 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4173 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4174 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
4175 if (klass->cast_class == mono_defaults.object_class) {
4176 int parent_reg = alloc_preg (cfg);
4177 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
4178 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
4179 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4180 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4181 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
4182 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
4183 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4184 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4185 } else if (klass->cast_class == mono_defaults.enum_class) {
4186 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4187 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4188 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
4189 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4191 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
4192 /* Check that the object is a vector too */
4193 int bounds_reg = alloc_preg (cfg);
4194 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
4195 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
4196 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4199 /* the is_null_bb target simply copies the input register to the output */
4200 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4202 } else if (mono_class_is_nullable (klass)) {
4203 g_assert (!context_used);
4204 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4205 /* the is_null_bb target simply copies the input register to the output */
4206 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
4208 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4209 g_assert (!context_used);
4210 /* the remoting code is broken, access the class for now */
4211 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4212 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4214 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4215 cfg->exception_ptr = klass;
4218 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4220 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4221 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4223 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4224 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
4226 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4227 /* the is_null_bb target simply copies the input register to the output */
4228 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
4233 MONO_START_BB (cfg, false_bb);
4235 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
4236 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4238 MONO_START_BB (cfg, is_null_bb);
4240 MONO_START_BB (cfg, end_bb);
4246 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4248 /* This opcode takes as input an object reference and a class, and returns:
4249 0) if the object is an instance of the class,
4250 1) if the object is not instance of the class,
4251 2) if the object is a proxy whose type cannot be determined */
4254 #ifndef DISABLE_REMOTING
4255 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
4257 MonoBasicBlock *true_bb, *false_bb, *end_bb;
4259 int obj_reg = src->dreg;
4260 int dreg = alloc_ireg (cfg);
4262 #ifndef DISABLE_REMOTING
4263 int klass_reg = alloc_preg (cfg);
4266 NEW_BBLOCK (cfg, true_bb);
4267 NEW_BBLOCK (cfg, false_bb);
4268 NEW_BBLOCK (cfg, end_bb);
4269 #ifndef DISABLE_REMOTING
4270 NEW_BBLOCK (cfg, false2_bb);
4271 NEW_BBLOCK (cfg, no_proxy_bb);
4274 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4275 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
4277 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4278 #ifndef DISABLE_REMOTING
4279 NEW_BBLOCK (cfg, interface_fail_bb);
4282 tmp_reg = alloc_preg (cfg);
4283 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4284 #ifndef DISABLE_REMOTING
4285 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
4286 MONO_START_BB (cfg, interface_fail_bb);
4287 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4289 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
4291 tmp_reg = alloc_preg (cfg);
4292 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4293 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4294 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
4296 mini_emit_iface_cast (cfg, tmp_reg, klass, false_bb, true_bb);
4299 #ifndef DISABLE_REMOTING
4300 tmp_reg = alloc_preg (cfg);
4301 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4302 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4304 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4305 tmp_reg = alloc_preg (cfg);
4306 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4307 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4309 tmp_reg = alloc_preg (cfg);
4310 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4311 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4312 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4314 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
4315 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
4317 MONO_START_BB (cfg, no_proxy_bb);
4319 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
4321 g_error ("transparent proxy support is disabled while trying to JIT code that uses it");
4325 MONO_START_BB (cfg, false_bb);
4327 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4328 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4330 #ifndef DISABLE_REMOTING
4331 MONO_START_BB (cfg, false2_bb);
4333 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
4334 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4337 MONO_START_BB (cfg, true_bb);
4339 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4341 MONO_START_BB (cfg, end_bb);
4344 MONO_INST_NEW (cfg, ins, OP_ICONST);
4346 ins->type = STACK_I4;
4352 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4354 /* This opcode takes as input an object reference and a class, and returns:
4355 0) if the object is an instance of the class,
4356 1) if the object is a proxy whose type cannot be determined
4357 an InvalidCastException exception is thrown otherwhise*/
4360 #ifndef DISABLE_REMOTING
4361 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
4363 MonoBasicBlock *ok_result_bb;
4365 int obj_reg = src->dreg;
4366 int dreg = alloc_ireg (cfg);
4367 int tmp_reg = alloc_preg (cfg);
4369 #ifndef DISABLE_REMOTING
4370 int klass_reg = alloc_preg (cfg);
4371 NEW_BBLOCK (cfg, end_bb);
4374 NEW_BBLOCK (cfg, ok_result_bb);
4376 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4377 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
4379 save_cast_details (cfg, klass, obj_reg, FALSE, NULL);
4381 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4382 #ifndef DISABLE_REMOTING
4383 NEW_BBLOCK (cfg, interface_fail_bb);
4385 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4386 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
4387 MONO_START_BB (cfg, interface_fail_bb);
4388 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4390 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
4392 tmp_reg = alloc_preg (cfg);
4393 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4394 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4395 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
4397 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4398 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4400 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4401 mini_emit_iface_cast (cfg, tmp_reg, klass, NULL, NULL);
4402 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, ok_result_bb);
4405 #ifndef DISABLE_REMOTING
4406 NEW_BBLOCK (cfg, no_proxy_bb);
4408 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4409 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4410 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4412 tmp_reg = alloc_preg (cfg);
4413 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4414 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4416 tmp_reg = alloc_preg (cfg);
4417 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4418 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4419 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4421 NEW_BBLOCK (cfg, fail_1_bb);
4423 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
4425 MONO_START_BB (cfg, fail_1_bb);
4427 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4428 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4430 MONO_START_BB (cfg, no_proxy_bb);
4432 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
4434 g_error ("Transparent proxy support is disabled while trying to JIT code that uses it");
4438 MONO_START_BB (cfg, ok_result_bb);
4440 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4442 #ifndef DISABLE_REMOTING
4443 MONO_START_BB (cfg, end_bb);
4447 MONO_INST_NEW (cfg, ins, OP_ICONST);
4449 ins->type = STACK_I4;
4455 * Returns NULL and set the cfg exception on error.
4457 static G_GNUC_UNUSED MonoInst*
4458 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used)
4462 gpointer *trampoline;
4463 MonoInst *obj, *method_ins, *tramp_ins;
4467 obj = handle_alloc (cfg, klass, FALSE, 0);
4471 /* Inline the contents of mono_delegate_ctor */
4473 /* Set target field */
4474 /* Optimize away setting of NULL target */
4475 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0)) {
4476 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
4477 if (cfg->gen_write_barriers) {
4478 dreg = alloc_preg (cfg);
4479 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target));
4480 emit_write_barrier (cfg, ptr, target);
4484 /* Set method field */
4485 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
4486 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
4487 if (cfg->gen_write_barriers) {
4488 dreg = alloc_preg (cfg);
4489 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method));
4490 emit_write_barrier (cfg, ptr, method_ins);
4493 * To avoid looking up the compiled code belonging to the target method
4494 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
4495 * store it, and we fill it after the method has been compiled.
4497 if (!method->dynamic && !(cfg->opt & MONO_OPT_SHARED)) {
4498 MonoInst *code_slot_ins;
4501 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
4503 domain = mono_domain_get ();
4504 mono_domain_lock (domain);
4505 if (!domain_jit_info (domain)->method_code_hash)
4506 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
4507 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
4509 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
4510 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
4512 mono_domain_unlock (domain);
4514 if (cfg->compile_aot)
4515 EMIT_NEW_AOTCONST (cfg, code_slot_ins, MONO_PATCH_INFO_METHOD_CODE_SLOT, method);
4517 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
4519 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
4522 /* Set invoke_impl field */
4523 if (cfg->compile_aot) {
4524 MonoClassMethodPair *del_tramp;
4526 del_tramp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoClassMethodPair));
4527 del_tramp->klass = klass;
4528 del_tramp->method = context_used ? NULL : method;
4529 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, del_tramp);
4531 trampoline = mono_create_delegate_trampoline_with_method (cfg->domain, klass, context_used ? NULL : method);
4532 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
4534 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
4536 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
4542 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
4544 MonoJitICallInfo *info;
4546 /* Need to register the icall so it gets an icall wrapper */
4547 info = mono_get_array_new_va_icall (rank);
4549 cfg->flags |= MONO_CFG_HAS_VARARGS;
4551 /* mono_array_new_va () needs a vararg calling convention */
4552 cfg->disable_llvm = TRUE;
4554 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
4555 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
4559 mono_emit_load_got_addr (MonoCompile *cfg)
4561 MonoInst *getaddr, *dummy_use;
4563 if (!cfg->got_var || cfg->got_var_allocated)
4566 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
4567 getaddr->cil_code = cfg->header->code;
4568 getaddr->dreg = cfg->got_var->dreg;
4570 /* Add it to the start of the first bblock */
4571 if (cfg->bb_entry->code) {
4572 getaddr->next = cfg->bb_entry->code;
4573 cfg->bb_entry->code = getaddr;
4576 MONO_ADD_INS (cfg->bb_entry, getaddr);
4578 cfg->got_var_allocated = TRUE;
4581 * Add a dummy use to keep the got_var alive, since real uses might
4582 * only be generated by the back ends.
4583 * Add it to end_bblock, so the variable's lifetime covers the whole
4585 * It would be better to make the usage of the got var explicit in all
4586 * cases when the backend needs it (i.e. calls, throw etc.), so this
4587 * wouldn't be needed.
4589 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
4590 MONO_ADD_INS (cfg->bb_exit, dummy_use);
4593 static int inline_limit;
4594 static gboolean inline_limit_inited;
4597 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
4599 MonoMethodHeaderSummary header;
4601 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
4602 MonoMethodSignature *sig = mono_method_signature (method);
4606 if (cfg->generic_sharing_context)
4609 if (cfg->inline_depth > 10)
4612 #ifdef MONO_ARCH_HAVE_LMF_OPS
4613 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
4614 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
4615 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
4620 if (!mono_method_get_header_summary (method, &header))
4623 /*runtime, icall and pinvoke are checked by summary call*/
4624 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
4625 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
4626 (mono_class_is_marshalbyref (method->klass)) ||
4630 /* also consider num_locals? */
4631 /* Do the size check early to avoid creating vtables */
4632 if (!inline_limit_inited) {
4633 if (g_getenv ("MONO_INLINELIMIT"))
4634 inline_limit = atoi (g_getenv ("MONO_INLINELIMIT"));
4636 inline_limit = INLINE_LENGTH_LIMIT;
4637 inline_limit_inited = TRUE;
4639 if (header.code_size >= inline_limit && !(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
4643 * if we can initialize the class of the method right away, we do,
4644 * otherwise we don't allow inlining if the class needs initialization,
4645 * since it would mean inserting a call to mono_runtime_class_init()
4646 * inside the inlined code
4648 if (!(cfg->opt & MONO_OPT_SHARED)) {
4649 /* The AggressiveInlining hint is a good excuse to force that cctor to run. */
4650 if (method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING) {
4651 vtable = mono_class_vtable (cfg->domain, method->klass);
4654 if (!cfg->compile_aot)
4655 mono_runtime_class_init (vtable);
4656 } else if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
4657 if (cfg->run_cctors && method->klass->has_cctor) {
4658 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
4659 if (!method->klass->runtime_info)
4660 /* No vtable created yet */
4662 vtable = mono_class_vtable (cfg->domain, method->klass);
4665 /* This makes so that inline cannot trigger */
4666 /* .cctors: too many apps depend on them */
4667 /* running with a specific order... */
4668 if (! vtable->initialized)
4670 mono_runtime_class_init (vtable);
4672 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
4673 if (!method->klass->runtime_info)
4674 /* No vtable created yet */
4676 vtable = mono_class_vtable (cfg->domain, method->klass);
4679 if (!vtable->initialized)
4684 * If we're compiling for shared code
4685 * the cctor will need to be run at aot method load time, for example,
4686 * or at the end of the compilation of the inlining method.
4688 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
4693 * CAS - do not inline methods with declarative security
4694 * Note: this has to be before any possible return TRUE;
4696 if (mono_security_method_has_declsec (method))
4699 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
4700 if (mono_arch_is_soft_float ()) {
4702 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
4704 for (i = 0; i < sig->param_count; ++i)
4705 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
4714 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoClass *klass, MonoVTable *vtable)
4716 if (!cfg->compile_aot) {
4718 if (vtable->initialized)
4722 if (klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
4723 if (cfg->method == method)
4727 if (!mono_class_needs_cctor_run (klass, method))
4730 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (klass == method->klass))
4731 /* The initialization is already done before the method is called */
4738 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
4742 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
4745 if (mini_is_gsharedvt_variable_klass (cfg, klass)) {
4748 mono_class_init (klass);
4749 size = mono_class_array_element_size (klass);
4752 mult_reg = alloc_preg (cfg);
4753 array_reg = arr->dreg;
4754 index_reg = index->dreg;
4756 #if SIZEOF_REGISTER == 8
4757 /* The array reg is 64 bits but the index reg is only 32 */
4758 if (COMPILE_LLVM (cfg)) {
4760 index2_reg = index_reg;
4762 index2_reg = alloc_preg (cfg);
4763 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
4766 if (index->type == STACK_I8) {
4767 index2_reg = alloc_preg (cfg);
4768 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
4770 index2_reg = index_reg;
4775 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
4777 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4778 if (size == 1 || size == 2 || size == 4 || size == 8) {
4779 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
4781 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], G_STRUCT_OFFSET (MonoArray, vector));
4782 ins->klass = mono_class_get_element_class (klass);
4783 ins->type = STACK_MP;
4789 add_reg = alloc_ireg_mp (cfg);
4792 MonoInst *rgctx_ins;
4795 g_assert (cfg->generic_sharing_context);
4796 context_used = mini_class_check_context_used (cfg, klass);
4797 g_assert (context_used);
4798 rgctx_ins = emit_get_gsharedvt_info (cfg, &klass->byval_arg, MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE);
4799 MONO_EMIT_NEW_BIALU (cfg, OP_IMUL, mult_reg, index2_reg, rgctx_ins->dreg);
4801 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
4803 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
4804 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
4805 ins->klass = mono_class_get_element_class (klass);
4806 ins->type = STACK_MP;
4807 MONO_ADD_INS (cfg->cbb, ins);
4812 #ifndef MONO_ARCH_EMULATE_MUL_DIV
4814 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
4816 int bounds_reg = alloc_preg (cfg);
4817 int add_reg = alloc_ireg_mp (cfg);
4818 int mult_reg = alloc_preg (cfg);
4819 int mult2_reg = alloc_preg (cfg);
4820 int low1_reg = alloc_preg (cfg);
4821 int low2_reg = alloc_preg (cfg);
4822 int high1_reg = alloc_preg (cfg);
4823 int high2_reg = alloc_preg (cfg);
4824 int realidx1_reg = alloc_preg (cfg);
4825 int realidx2_reg = alloc_preg (cfg);
4826 int sum_reg = alloc_preg (cfg);
4827 int index1, index2, tmpreg;
4831 mono_class_init (klass);
4832 size = mono_class_array_element_size (klass);
4834 index1 = index_ins1->dreg;
4835 index2 = index_ins2->dreg;
4837 #if SIZEOF_REGISTER == 8
4838 /* The array reg is 64 bits but the index reg is only 32 */
4839 if (COMPILE_LLVM (cfg)) {
4842 tmpreg = alloc_preg (cfg);
4843 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index1);
4845 tmpreg = alloc_preg (cfg);
4846 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index2);
4850 // FIXME: Do we need to do something here for i8 indexes, like in ldelema_1_ins ?
4854 /* range checking */
4855 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
4856 arr->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
4858 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
4859 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4860 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
4861 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
4862 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
4863 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
4864 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4866 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
4867 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4868 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
4869 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
4870 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, length));
4871 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
4872 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4874 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
4875 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
4876 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
4877 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
4878 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
4880 ins->type = STACK_MP;
4882 MONO_ADD_INS (cfg->cbb, ins);
4889 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
4893 MonoMethod *addr_method;
4896 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
4899 return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], TRUE);
4901 #ifndef MONO_ARCH_EMULATE_MUL_DIV
4902 /* emit_ldelema_2 depends on OP_LMUL */
4903 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS)) {
4904 return mini_emit_ldelema_2_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], sp [2]);
4908 element_size = mono_class_array_element_size (cmethod->klass->element_class);
4909 addr_method = mono_marshal_get_array_address (rank, element_size);
4910 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
4915 static MonoBreakPolicy
4916 always_insert_breakpoint (MonoMethod *method)
4918 return MONO_BREAK_POLICY_ALWAYS;
4921 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
4924 * mono_set_break_policy:
4925 * policy_callback: the new callback function
4927 * Allow embedders to decide wherther to actually obey breakpoint instructions
4928 * (both break IL instructions and Debugger.Break () method calls), for example
4929 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
4930 * untrusted or semi-trusted code.
4932 * @policy_callback will be called every time a break point instruction needs to
4933 * be inserted with the method argument being the method that calls Debugger.Break()
4934 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
4935 * if it wants the breakpoint to not be effective in the given method.
4936 * #MONO_BREAK_POLICY_ALWAYS is the default.
4939 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
4941 if (policy_callback)
4942 break_policy_func = policy_callback;
4944 break_policy_func = always_insert_breakpoint;
4948 should_insert_brekpoint (MonoMethod *method) {
4949 switch (break_policy_func (method)) {
4950 case MONO_BREAK_POLICY_ALWAYS:
4952 case MONO_BREAK_POLICY_NEVER:
4954 case MONO_BREAK_POLICY_ON_DBG:
4955 g_warning ("mdb no longer supported");
4958 g_warning ("Incorrect value returned from break policy callback");
4963 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
4965 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
4967 MonoInst *addr, *store, *load;
4968 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
4970 /* the bounds check is already done by the callers */
4971 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
4973 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
4974 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
4975 if (mini_type_is_reference (cfg, fsig->params [2]))
4976 emit_write_barrier (cfg, addr, load);
4978 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
4979 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
4986 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
4988 return mini_type_is_reference (cfg, &klass->byval_arg);
4992 emit_array_store (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, gboolean safety_checks)
4994 if (safety_checks && generic_class_is_reference_type (cfg, klass) &&
4995 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
4996 MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class, 1);
4997 MonoMethod *helper = mono_marshal_get_virtual_stelemref (obj_array);
4998 MonoInst *iargs [3];
5001 mono_class_setup_vtable (obj_array);
5002 g_assert (helper->slot);
5004 if (sp [0]->type != STACK_OBJ)
5006 if (sp [2]->type != STACK_OBJ)
5013 return mono_emit_method_call (cfg, helper, iargs, sp [0]);
5017 if (mini_is_gsharedvt_variable_klass (cfg, klass)) {
5020 // FIXME-VT: OP_ICONST optimization
5021 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
5022 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5023 ins->opcode = OP_STOREV_MEMBASE;
5024 } else if (sp [1]->opcode == OP_ICONST) {
5025 int array_reg = sp [0]->dreg;
5026 int index_reg = sp [1]->dreg;
5027 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
5030 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
5031 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
5033 MonoInst *addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], safety_checks);
5034 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5035 if (generic_class_is_reference_type (cfg, klass))
5036 emit_write_barrier (cfg, addr, sp [2]);
5043 emit_array_unsafe_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
5048 eklass = mono_class_from_mono_type (fsig->params [2]);
5050 eklass = mono_class_from_mono_type (fsig->ret);
5053 return emit_array_store (cfg, eklass, args, FALSE);
5055 MonoInst *ins, *addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
5056 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &eklass->byval_arg, addr->dreg, 0);
5062 is_unsafe_mov_compatible (MonoClass *param_klass, MonoClass *return_klass)
5066 //Only allow for valuetypes
5067 if (!param_klass->valuetype || !return_klass->valuetype)
5071 if (param_klass->has_references || return_klass->has_references)
5074 /* Avoid mixing structs and primitive types/enums, they need to be handled differently in the JIT */
5075 if ((MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && !MONO_TYPE_ISSTRUCT (&return_klass->byval_arg)) ||
5076 (!MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && MONO_TYPE_ISSTRUCT (&return_klass->byval_arg)))
5079 if (param_klass->byval_arg.type == MONO_TYPE_R4 || param_klass->byval_arg.type == MONO_TYPE_R8 ||
5080 return_klass->byval_arg.type == MONO_TYPE_R4 || return_klass->byval_arg.type == MONO_TYPE_R8)
5083 //And have the same size
5084 if (mono_class_value_size (param_klass, &align) != mono_class_value_size (return_klass, &align))
5090 emit_array_unsafe_mov (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args)
5092 MonoClass *param_klass = mono_class_from_mono_type (fsig->params [0]);
5093 MonoClass *return_klass = mono_class_from_mono_type (fsig->ret);
5095 //Valuetypes that are semantically equivalent
5096 if (is_unsafe_mov_compatible (param_klass, return_klass))
5099 //Arrays of valuetypes that are semantically equivalent
5100 if (param_klass->rank == 1 && return_klass->rank == 1 && is_unsafe_mov_compatible (param_klass->element_class, return_klass->element_class))
5107 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5109 #ifdef MONO_ARCH_SIMD_INTRINSICS
5110 MonoInst *ins = NULL;
5112 if (cfg->opt & MONO_OPT_SIMD) {
5113 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
5119 return mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
5123 emit_memory_barrier (MonoCompile *cfg, int kind)
5125 MonoInst *ins = NULL;
5126 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
5127 MONO_ADD_INS (cfg->cbb, ins);
5128 ins->backend.memory_barrier_kind = kind;
5134 llvm_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5136 MonoInst *ins = NULL;
5139 /* The LLVM backend supports these intrinsics */
5140 if (cmethod->klass == mono_defaults.math_class) {
5141 if (strcmp (cmethod->name, "Sin") == 0) {
5143 } else if (strcmp (cmethod->name, "Cos") == 0) {
5145 } else if (strcmp (cmethod->name, "Sqrt") == 0) {
5147 } else if (strcmp (cmethod->name, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8) {
5152 MONO_INST_NEW (cfg, ins, opcode);
5153 ins->type = STACK_R8;
5154 ins->dreg = mono_alloc_freg (cfg);
5155 ins->sreg1 = args [0]->dreg;
5156 MONO_ADD_INS (cfg->cbb, ins);
5160 if (cfg->opt & MONO_OPT_CMOV) {
5161 if (strcmp (cmethod->name, "Min") == 0) {
5162 if (fsig->params [0]->type == MONO_TYPE_I4)
5164 if (fsig->params [0]->type == MONO_TYPE_U4)
5165 opcode = OP_IMIN_UN;
5166 else if (fsig->params [0]->type == MONO_TYPE_I8)
5168 else if (fsig->params [0]->type == MONO_TYPE_U8)
5169 opcode = OP_LMIN_UN;
5170 } else if (strcmp (cmethod->name, "Max") == 0) {
5171 if (fsig->params [0]->type == MONO_TYPE_I4)
5173 if (fsig->params [0]->type == MONO_TYPE_U4)
5174 opcode = OP_IMAX_UN;
5175 else if (fsig->params [0]->type == MONO_TYPE_I8)
5177 else if (fsig->params [0]->type == MONO_TYPE_U8)
5178 opcode = OP_LMAX_UN;
5183 MONO_INST_NEW (cfg, ins, opcode);
5184 ins->type = fsig->params [0]->type == MONO_TYPE_I4 ? STACK_I4 : STACK_I8;
5185 ins->dreg = mono_alloc_ireg (cfg);
5186 ins->sreg1 = args [0]->dreg;
5187 ins->sreg2 = args [1]->dreg;
5188 MONO_ADD_INS (cfg->cbb, ins);
5196 mini_emit_inst_for_sharable_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5198 if (cmethod->klass == mono_defaults.array_class) {
5199 if (strcmp (cmethod->name, "UnsafeStore") == 0)
5200 return emit_array_unsafe_access (cfg, fsig, args, TRUE);
5201 else if (strcmp (cmethod->name, "UnsafeLoad") == 0)
5202 return emit_array_unsafe_access (cfg, fsig, args, FALSE);
5203 else if (strcmp (cmethod->name, "UnsafeMov") == 0)
5204 return emit_array_unsafe_mov (cfg, fsig, args);
5211 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5213 MonoInst *ins = NULL;
5215 static MonoClass *runtime_helpers_class = NULL;
5216 if (! runtime_helpers_class)
5217 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
5218 "System.Runtime.CompilerServices", "RuntimeHelpers");
5220 if (cmethod->klass == mono_defaults.string_class) {
5221 if (strcmp (cmethod->name, "get_Chars") == 0) {
5222 int dreg = alloc_ireg (cfg);
5223 int index_reg = alloc_preg (cfg);
5224 int mult_reg = alloc_preg (cfg);
5225 int add_reg = alloc_preg (cfg);
5227 #if SIZEOF_REGISTER == 8
5228 /* The array reg is 64 bits but the index reg is only 32 */
5229 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
5231 index_reg = args [1]->dreg;
5233 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
5235 #if defined(TARGET_X86) || defined(TARGET_AMD64)
5236 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, G_STRUCT_OFFSET (MonoString, chars));
5237 add_reg = ins->dreg;
5238 /* Avoid a warning */
5240 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5243 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
5244 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
5245 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5246 add_reg, G_STRUCT_OFFSET (MonoString, chars));
5248 type_from_op (ins, NULL, NULL);
5250 } else if (strcmp (cmethod->name, "get_Length") == 0) {
5251 int dreg = alloc_ireg (cfg);
5252 /* Decompose later to allow more optimizations */
5253 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
5254 ins->type = STACK_I4;
5255 ins->flags |= MONO_INST_FAULT;
5256 cfg->cbb->has_array_access = TRUE;
5257 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
5260 } else if (strcmp (cmethod->name, "InternalSetChar") == 0) {
5261 int mult_reg = alloc_preg (cfg);
5262 int add_reg = alloc_preg (cfg);
5264 /* The corlib functions check for oob already. */
5265 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
5266 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
5267 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, G_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
5268 return cfg->cbb->last_ins;
5271 } else if (cmethod->klass == mono_defaults.object_class) {
5273 if (strcmp (cmethod->name, "GetType") == 0) {
5274 int dreg = alloc_ireg_ref (cfg);
5275 int vt_reg = alloc_preg (cfg);
5276 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
5277 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, G_STRUCT_OFFSET (MonoVTable, type));
5278 type_from_op (ins, NULL, NULL);
5281 #if !defined(MONO_ARCH_EMULATE_MUL_DIV)
5282 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0 && !mono_gc_is_moving ()) {
5283 int dreg = alloc_ireg (cfg);
5284 int t1 = alloc_ireg (cfg);
5286 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
5287 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
5288 ins->type = STACK_I4;
5292 } else if (strcmp (cmethod->name, ".ctor") == 0) {
5293 MONO_INST_NEW (cfg, ins, OP_NOP);
5294 MONO_ADD_INS (cfg->cbb, ins);
5298 } else if (cmethod->klass == mono_defaults.array_class) {
5299 if (!cfg->gsharedvt && strcmp (cmethod->name + 1, "etGenericValueImpl") == 0)
5300 return emit_array_generic_access (cfg, fsig, args, *cmethod->name == 'S');
5302 #ifndef MONO_BIG_ARRAYS
5304 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
5307 if ((strcmp (cmethod->name, "GetLength") == 0 || strcmp (cmethod->name, "GetLowerBound") == 0) && args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
5308 int dreg = alloc_ireg (cfg);
5309 int bounds_reg = alloc_ireg_mp (cfg);
5310 MonoBasicBlock *end_bb, *szarray_bb;
5311 gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
5313 NEW_BBLOCK (cfg, end_bb);
5314 NEW_BBLOCK (cfg, szarray_bb);
5316 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
5317 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
5318 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
5319 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
5320 /* Non-szarray case */
5322 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5323 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
5325 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5326 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5327 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
5328 MONO_START_BB (cfg, szarray_bb);
5331 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5332 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
5334 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
5335 MONO_START_BB (cfg, end_bb);
5337 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
5338 ins->type = STACK_I4;
5344 if (cmethod->name [0] != 'g')
5347 if (strcmp (cmethod->name, "get_Rank") == 0) {
5348 int dreg = alloc_ireg (cfg);
5349 int vtable_reg = alloc_preg (cfg);
5350 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
5351 args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
5352 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
5353 vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
5354 type_from_op (ins, NULL, NULL);
5357 } else if (strcmp (cmethod->name, "get_Length") == 0) {
5358 int dreg = alloc_ireg (cfg);
5360 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5361 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
5362 type_from_op (ins, NULL, NULL);
5367 } else if (cmethod->klass == runtime_helpers_class) {
5369 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0) {
5370 EMIT_NEW_ICONST (cfg, ins, G_STRUCT_OFFSET (MonoString, chars));
5374 } else if (cmethod->klass == mono_defaults.thread_class) {
5375 if (strcmp (cmethod->name, "SpinWait_nop") == 0) {
5376 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
5377 MONO_ADD_INS (cfg->cbb, ins);
5379 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0) {
5380 return emit_memory_barrier (cfg, FullBarrier);
5382 } else if (cmethod->klass == mono_defaults.monitor_class) {
5384 /* FIXME this should be integrated to the check below once we support the trampoline version */
5385 #if defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
5386 if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 2) {
5387 MonoMethod *fast_method = NULL;
5389 /* Avoid infinite recursion */
5390 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN && !strcmp (cfg->method->name, "FastMonitorEnterV4"))
5393 fast_method = mono_monitor_get_fast_path (cmethod);
5397 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
5401 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
5402 if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 1) {
5405 if (COMPILE_LLVM (cfg)) {
5407 * Pass the argument normally, the LLVM backend will handle the
5408 * calling convention problems.
5410 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
5412 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
5413 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
5414 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
5415 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
5418 return (MonoInst*)call;
5419 } else if (strcmp (cmethod->name, "Exit") == 0) {
5422 if (COMPILE_LLVM (cfg)) {
5423 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
5425 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
5426 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
5427 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
5428 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
5431 return (MonoInst*)call;
5433 #elif defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
5435 MonoMethod *fast_method = NULL;
5437 /* Avoid infinite recursion */
5438 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN &&
5439 (strcmp (cfg->method->name, "FastMonitorEnter") == 0 ||
5440 strcmp (cfg->method->name, "FastMonitorExit") == 0))
5443 if ((strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 2) ||
5444 strcmp (cmethod->name, "Exit") == 0)
5445 fast_method = mono_monitor_get_fast_path (cmethod);
5449 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
5452 } else if (cmethod->klass->image == mono_defaults.corlib &&
5453 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
5454 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
5457 #if SIZEOF_REGISTER == 8
5458 if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
5459 /* 64 bit reads are already atomic */
5460 MONO_INST_NEW (cfg, ins, OP_LOADI8_MEMBASE);
5461 ins->dreg = mono_alloc_preg (cfg);
5462 ins->inst_basereg = args [0]->dreg;
5463 ins->inst_offset = 0;
5464 MONO_ADD_INS (cfg->cbb, ins);
5468 #ifdef MONO_ARCH_HAVE_ATOMIC_ADD
5469 if (strcmp (cmethod->name, "Increment") == 0) {
5470 MonoInst *ins_iconst;
5473 if (fsig->params [0]->type == MONO_TYPE_I4) {
5474 opcode = OP_ATOMIC_ADD_NEW_I4;
5475 cfg->has_atomic_add_new_i4 = TRUE;
5477 #if SIZEOF_REGISTER == 8
5478 else if (fsig->params [0]->type == MONO_TYPE_I8)
5479 opcode = OP_ATOMIC_ADD_NEW_I8;
5482 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
5483 ins_iconst->inst_c0 = 1;
5484 ins_iconst->dreg = mono_alloc_ireg (cfg);
5485 MONO_ADD_INS (cfg->cbb, ins_iconst);
5487 MONO_INST_NEW (cfg, ins, opcode);
5488 ins->dreg = mono_alloc_ireg (cfg);
5489 ins->inst_basereg = args [0]->dreg;
5490 ins->inst_offset = 0;
5491 ins->sreg2 = ins_iconst->dreg;
5492 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
5493 MONO_ADD_INS (cfg->cbb, ins);
5495 } else if (strcmp (cmethod->name, "Decrement") == 0) {
5496 MonoInst *ins_iconst;
5499 if (fsig->params [0]->type == MONO_TYPE_I4) {
5500 opcode = OP_ATOMIC_ADD_NEW_I4;
5501 cfg->has_atomic_add_new_i4 = TRUE;
5503 #if SIZEOF_REGISTER == 8
5504 else if (fsig->params [0]->type == MONO_TYPE_I8)
5505 opcode = OP_ATOMIC_ADD_NEW_I8;
5508 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
5509 ins_iconst->inst_c0 = -1;
5510 ins_iconst->dreg = mono_alloc_ireg (cfg);
5511 MONO_ADD_INS (cfg->cbb, ins_iconst);
5513 MONO_INST_NEW (cfg, ins, opcode);
5514 ins->dreg = mono_alloc_ireg (cfg);
5515 ins->inst_basereg = args [0]->dreg;
5516 ins->inst_offset = 0;
5517 ins->sreg2 = ins_iconst->dreg;
5518 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
5519 MONO_ADD_INS (cfg->cbb, ins);
5521 } else if (strcmp (cmethod->name, "Add") == 0) {
5524 if (fsig->params [0]->type == MONO_TYPE_I4) {
5525 opcode = OP_ATOMIC_ADD_NEW_I4;
5526 cfg->has_atomic_add_new_i4 = TRUE;
5528 #if SIZEOF_REGISTER == 8
5529 else if (fsig->params [0]->type == MONO_TYPE_I8)
5530 opcode = OP_ATOMIC_ADD_NEW_I8;
5534 MONO_INST_NEW (cfg, ins, opcode);
5535 ins->dreg = mono_alloc_ireg (cfg);
5536 ins->inst_basereg = args [0]->dreg;
5537 ins->inst_offset = 0;
5538 ins->sreg2 = args [1]->dreg;
5539 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
5540 MONO_ADD_INS (cfg->cbb, ins);
5543 #endif /* MONO_ARCH_HAVE_ATOMIC_ADD */
5545 #ifdef MONO_ARCH_HAVE_ATOMIC_EXCHANGE
5546 if (strcmp (cmethod->name, "Exchange") == 0) {
5548 gboolean is_ref = fsig->params [0]->type == MONO_TYPE_OBJECT;
5550 if (fsig->params [0]->type == MONO_TYPE_I4) {
5551 opcode = OP_ATOMIC_EXCHANGE_I4;
5552 cfg->has_atomic_exchange_i4 = TRUE;
5554 #if SIZEOF_REGISTER == 8
5555 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I8) ||
5556 (fsig->params [0]->type == MONO_TYPE_I))
5557 opcode = OP_ATOMIC_EXCHANGE_I8;
5559 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I)) {
5560 opcode = OP_ATOMIC_EXCHANGE_I4;
5561 cfg->has_atomic_exchange_i4 = TRUE;
5567 MONO_INST_NEW (cfg, ins, opcode);
5568 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : mono_alloc_ireg (cfg);
5569 ins->inst_basereg = args [0]->dreg;
5570 ins->inst_offset = 0;
5571 ins->sreg2 = args [1]->dreg;
5572 MONO_ADD_INS (cfg->cbb, ins);
5574 switch (fsig->params [0]->type) {
5576 ins->type = STACK_I4;
5580 ins->type = STACK_I8;
5582 case MONO_TYPE_OBJECT:
5583 ins->type = STACK_OBJ;
5586 g_assert_not_reached ();
5589 if (cfg->gen_write_barriers && is_ref)
5590 emit_write_barrier (cfg, args [0], args [1]);
5592 #endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
5594 #ifdef MONO_ARCH_HAVE_ATOMIC_CAS
5595 if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
5597 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [1]);
5598 if (fsig->params [1]->type == MONO_TYPE_I4)
5600 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I)
5601 size = sizeof (gpointer);
5602 else if (sizeof (gpointer) == 8 && fsig->params [1]->type == MONO_TYPE_I8)
5605 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
5606 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
5607 ins->sreg1 = args [0]->dreg;
5608 ins->sreg2 = args [1]->dreg;
5609 ins->sreg3 = args [2]->dreg;
5610 ins->type = STACK_I4;
5611 MONO_ADD_INS (cfg->cbb, ins);
5612 } else if (size == 8) {
5613 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I8);
5614 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
5615 ins->sreg1 = args [0]->dreg;
5616 ins->sreg2 = args [1]->dreg;
5617 ins->sreg3 = args [2]->dreg;
5618 ins->type = STACK_I8;
5619 MONO_ADD_INS (cfg->cbb, ins);
5621 /* g_assert_not_reached (); */
5623 if (cfg->gen_write_barriers && is_ref)
5624 emit_write_barrier (cfg, args [0], args [1]);
5626 #endif /* MONO_ARCH_HAVE_ATOMIC_CAS */
5628 if (strcmp (cmethod->name, "MemoryBarrier") == 0)
5629 ins = emit_memory_barrier (cfg, FullBarrier);
5633 } else if (cmethod->klass->image == mono_defaults.corlib) {
5634 if (cmethod->name [0] == 'B' && strcmp (cmethod->name, "Break") == 0
5635 && strcmp (cmethod->klass->name, "Debugger") == 0) {
5636 if (should_insert_brekpoint (cfg->method)) {
5637 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
5639 MONO_INST_NEW (cfg, ins, OP_NOP);
5640 MONO_ADD_INS (cfg->cbb, ins);
5644 if (cmethod->name [0] == 'g' && strcmp (cmethod->name, "get_IsRunningOnWindows") == 0
5645 && strcmp (cmethod->klass->name, "Environment") == 0) {
5647 EMIT_NEW_ICONST (cfg, ins, 1);
5649 EMIT_NEW_ICONST (cfg, ins, 0);
5653 } else if (cmethod->klass == mono_defaults.math_class) {
5655 * There is general branches code for Min/Max, but it does not work for
5657 * http://everything2.com/?node_id=1051618
5659 } else if ((!strcmp (cmethod->klass->image->assembly->aname.name, "MonoMac") || !strcmp (cmethod->klass->image->assembly->aname.name, "monotouch")) && !strcmp (cmethod->klass->name, "Selector") && !strcmp (cmethod->name, "GetHandle") && cfg->compile_aot && (args [0]->opcode == OP_GOT_ENTRY || args[0]->opcode == OP_AOTCONST)) {
5660 #ifdef MONO_ARCH_HAVE_OBJC_GET_SELECTOR
5662 MonoJumpInfoToken *ji;
5665 cfg->disable_llvm = TRUE;
5667 if (args [0]->opcode == OP_GOT_ENTRY) {
5668 pi = args [0]->inst_p1;
5669 g_assert (pi->opcode == OP_PATCH_INFO);
5670 g_assert ((int)pi->inst_p1 == MONO_PATCH_INFO_LDSTR);
5673 g_assert ((int)args [0]->inst_p1 == MONO_PATCH_INFO_LDSTR);
5674 ji = args [0]->inst_p0;
5677 NULLIFY_INS (args [0]);
5680 s = mono_ldstr (cfg->domain, ji->image, mono_metadata_token_index (ji->token));
5681 MONO_INST_NEW (cfg, ins, OP_OBJC_GET_SELECTOR);
5682 ins->dreg = mono_alloc_ireg (cfg);
5684 ins->inst_p0 = mono_string_to_utf8 (s);
5685 MONO_ADD_INS (cfg->cbb, ins);
5690 #ifdef MONO_ARCH_SIMD_INTRINSICS
5691 if (cfg->opt & MONO_OPT_SIMD) {
5692 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
5698 ins = mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
5702 if (COMPILE_LLVM (cfg)) {
5703 ins = llvm_emit_inst_for_method (cfg, cmethod, fsig, args);
5708 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
5712 * This entry point could be used later for arbitrary method
5715 inline static MonoInst*
5716 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
5717 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
5719 if (method->klass == mono_defaults.string_class) {
5720 /* managed string allocation support */
5721 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS) && !(cfg->opt & MONO_OPT_SHARED)) {
5722 MonoInst *iargs [2];
5723 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
5724 MonoMethod *managed_alloc = NULL;
5726 g_assert (vtable); /*Should not fail since it System.String*/
5727 #ifndef MONO_CROSS_COMPILE
5728 managed_alloc = mono_gc_get_managed_allocator (method->klass, FALSE);
5732 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
5733 iargs [1] = args [0];
5734 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
5741 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
5743 MonoInst *store, *temp;
5746 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
5747 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
5750 * FIXME: We should use *args++ = sp [0], but that would mean the arg
5751 * would be different than the MonoInst's used to represent arguments, and
5752 * the ldelema implementation can't deal with that.
5753 * Solution: When ldelema is used on an inline argument, create a var for
5754 * it, emit ldelema on that var, and emit the saving code below in
5755 * inline_method () if needed.
5757 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
5758 cfg->args [i] = temp;
5759 /* This uses cfg->args [i] which is set by the preceeding line */
5760 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
5761 store->cil_code = sp [0]->cil_code;
5766 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
5767 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
5769 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
5771 check_inline_called_method_name_limit (MonoMethod *called_method)
5774 static const char *limit = NULL;
5776 if (limit == NULL) {
5777 const char *limit_string = g_getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
5779 if (limit_string != NULL)
5780 limit = limit_string;
5785 if (limit [0] != '\0') {
5786 char *called_method_name = mono_method_full_name (called_method, TRUE);
5788 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
5789 g_free (called_method_name);
5791 //return (strncmp_result <= 0);
5792 return (strncmp_result == 0);
5799 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
5801 check_inline_caller_method_name_limit (MonoMethod *caller_method)
5804 static const char *limit = NULL;
5806 if (limit == NULL) {
5807 const char *limit_string = g_getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
5808 if (limit_string != NULL) {
5809 limit = limit_string;
5815 if (limit [0] != '\0') {
5816 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
5818 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
5819 g_free (caller_method_name);
5821 //return (strncmp_result <= 0);
5822 return (strncmp_result == 0);
5830 emit_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
5832 static double r8_0 = 0.0;
5836 rtype = mini_replace_type (rtype);
5840 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
5841 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
5842 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
5843 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
5844 MONO_EMIT_NEW_I8CONST (cfg, dreg, 0);
5845 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
5846 MONO_INST_NEW (cfg, ins, OP_R8CONST);
5847 ins->type = STACK_R8;
5848 ins->inst_p0 = (void*)&r8_0;
5850 MONO_ADD_INS (cfg->cbb, ins);
5851 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
5852 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
5853 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
5854 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (cfg, rtype)) {
5855 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
5857 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
5862 emit_dummy_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
5866 rtype = mini_replace_type (rtype);
5870 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_PCONST);
5871 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
5872 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_ICONST);
5873 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
5874 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_I8CONST);
5875 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
5876 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R8CONST);
5877 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
5878 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
5879 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
5880 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (cfg, rtype)) {
5881 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
5883 emit_init_rvar (cfg, dreg, rtype);
5887 /* If INIT is FALSE, emit dummy initialization statements to keep the IR valid */
5889 emit_init_local (MonoCompile *cfg, int local, MonoType *type, gboolean init)
5891 MonoInst *var = cfg->locals [local];
5892 if (COMPILE_SOFT_FLOAT (cfg)) {
5894 int reg = alloc_dreg (cfg, var->type);
5895 emit_init_rvar (cfg, reg, type);
5896 EMIT_NEW_LOCSTORE (cfg, store, local, cfg->cbb->last_ins);
5899 emit_init_rvar (cfg, var->dreg, type);
5901 emit_dummy_init_rvar (cfg, var->dreg, type);
5906 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
5907 guchar *ip, guint real_offset, GList *dont_inline, gboolean inline_always)
5909 MonoInst *ins, *rvar = NULL;
5910 MonoMethodHeader *cheader;
5911 MonoBasicBlock *ebblock, *sbblock;
5913 MonoMethod *prev_inlined_method;
5914 MonoInst **prev_locals, **prev_args;
5915 MonoType **prev_arg_types;
5916 guint prev_real_offset;
5917 GHashTable *prev_cbb_hash;
5918 MonoBasicBlock **prev_cil_offset_to_bb;
5919 MonoBasicBlock *prev_cbb;
5920 unsigned char* prev_cil_start;
5921 guint32 prev_cil_offset_to_bb_len;
5922 MonoMethod *prev_current_method;
5923 MonoGenericContext *prev_generic_context;
5924 gboolean ret_var_set, prev_ret_var_set, virtual = FALSE;
5926 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
5928 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
5929 if ((! inline_always) && ! check_inline_called_method_name_limit (cmethod))
5932 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
5933 if ((! inline_always) && ! check_inline_caller_method_name_limit (cfg->method))
5937 if (cfg->verbose_level > 2)
5938 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
5940 if (!cmethod->inline_info) {
5941 cfg->stat_inlineable_methods++;
5942 cmethod->inline_info = 1;
5945 /* allocate local variables */
5946 cheader = mono_method_get_header (cmethod);
5948 if (cheader == NULL || mono_loader_get_last_error ()) {
5949 MonoLoaderError *error = mono_loader_get_last_error ();
5952 mono_metadata_free_mh (cheader);
5953 if (inline_always && error)
5954 mono_cfg_set_exception (cfg, error->exception_type);
5956 mono_loader_clear_error ();
5960 /*Must verify before creating locals as it can cause the JIT to assert.*/
5961 if (mono_compile_is_broken (cfg, cmethod, FALSE)) {
5962 mono_metadata_free_mh (cheader);
5966 /* allocate space to store the return value */
5967 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
5968 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
5971 prev_locals = cfg->locals;
5972 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
5973 for (i = 0; i < cheader->num_locals; ++i)
5974 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
5976 /* allocate start and end blocks */
5977 /* This is needed so if the inline is aborted, we can clean up */
5978 NEW_BBLOCK (cfg, sbblock);
5979 sbblock->real_offset = real_offset;
5981 NEW_BBLOCK (cfg, ebblock);
5982 ebblock->block_num = cfg->num_bblocks++;
5983 ebblock->real_offset = real_offset;
5985 prev_args = cfg->args;
5986 prev_arg_types = cfg->arg_types;
5987 prev_inlined_method = cfg->inlined_method;
5988 cfg->inlined_method = cmethod;
5989 cfg->ret_var_set = FALSE;
5990 cfg->inline_depth ++;
5991 prev_real_offset = cfg->real_offset;
5992 prev_cbb_hash = cfg->cbb_hash;
5993 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
5994 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
5995 prev_cil_start = cfg->cil_start;
5996 prev_cbb = cfg->cbb;
5997 prev_current_method = cfg->current_method;
5998 prev_generic_context = cfg->generic_context;
5999 prev_ret_var_set = cfg->ret_var_set;
6001 if (*ip == CEE_CALLVIRT && !(cmethod->flags & METHOD_ATTRIBUTE_STATIC))
6004 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, dont_inline, sp, real_offset, virtual);
6006 ret_var_set = cfg->ret_var_set;
6008 cfg->inlined_method = prev_inlined_method;
6009 cfg->real_offset = prev_real_offset;
6010 cfg->cbb_hash = prev_cbb_hash;
6011 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
6012 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
6013 cfg->cil_start = prev_cil_start;
6014 cfg->locals = prev_locals;
6015 cfg->args = prev_args;
6016 cfg->arg_types = prev_arg_types;
6017 cfg->current_method = prev_current_method;
6018 cfg->generic_context = prev_generic_context;
6019 cfg->ret_var_set = prev_ret_var_set;
6020 cfg->inline_depth --;
6022 if ((costs >= 0 && costs < 60) || inline_always) {
6023 if (cfg->verbose_level > 2)
6024 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
6026 cfg->stat_inlined_methods++;
6028 /* always add some code to avoid block split failures */
6029 MONO_INST_NEW (cfg, ins, OP_NOP);
6030 MONO_ADD_INS (prev_cbb, ins);
6032 prev_cbb->next_bb = sbblock;
6033 link_bblock (cfg, prev_cbb, sbblock);
6036 * Get rid of the begin and end bblocks if possible to aid local
6039 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
6041 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
6042 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
6044 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
6045 MonoBasicBlock *prev = ebblock->in_bb [0];
6046 mono_merge_basic_blocks (cfg, prev, ebblock);
6048 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
6049 mono_merge_basic_blocks (cfg, prev_cbb, prev);
6050 cfg->cbb = prev_cbb;
6054 * Its possible that the rvar is set in some prev bblock, but not in others.
6060 for (i = 0; i < ebblock->in_count; ++i) {
6061 bb = ebblock->in_bb [i];
6063 if (bb->last_ins && bb->last_ins->opcode == OP_NOT_REACHED) {
6066 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
6076 * If the inlined method contains only a throw, then the ret var is not
6077 * set, so set it to a dummy value.
6080 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
6082 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
6085 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
6088 if (cfg->verbose_level > 2)
6089 printf ("INLINE ABORTED %s (cost %d)\n", mono_method_full_name (cmethod, TRUE), costs);
6090 cfg->exception_type = MONO_EXCEPTION_NONE;
6091 mono_loader_clear_error ();
6093 /* This gets rid of the newly added bblocks */
6094 cfg->cbb = prev_cbb;
6096 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
6101 * Some of these comments may well be out-of-date.
6102 * Design decisions: we do a single pass over the IL code (and we do bblock
6103 * splitting/merging in the few cases when it's required: a back jump to an IL
6104 * address that was not already seen as bblock starting point).
6105 * Code is validated as we go (full verification is still better left to metadata/verify.c).
6106 * Complex operations are decomposed in simpler ones right away. We need to let the
6107 * arch-specific code peek and poke inside this process somehow (except when the
6108 * optimizations can take advantage of the full semantic info of coarse opcodes).
6109 * All the opcodes of the form opcode.s are 'normalized' to opcode.
6110 * MonoInst->opcode initially is the IL opcode or some simplification of that
6111 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
6112 * opcode with value bigger than OP_LAST.
6113 * At this point the IR can be handed over to an interpreter, a dumb code generator
6114 * or to the optimizing code generator that will translate it to SSA form.
6116 * Profiling directed optimizations.
6117 * We may compile by default with few or no optimizations and instrument the code
6118 * or the user may indicate what methods to optimize the most either in a config file
6119 * or through repeated runs where the compiler applies offline the optimizations to
6120 * each method and then decides if it was worth it.
6123 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
6124 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
6125 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
6126 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
6127 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
6128 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
6129 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
6130 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) {cfg->exception_ptr = klass; LOAD_ERROR;}
6132 /* offset from br.s -> br like opcodes */
6133 #define BIG_BRANCH_OFFSET 13
6136 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
6138 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
6140 return b == NULL || b == bb;
6144 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
6146 unsigned char *ip = start;
6147 unsigned char *target;
6150 MonoBasicBlock *bblock;
6151 const MonoOpcode *opcode;
6154 cli_addr = ip - start;
6155 i = mono_opcode_value ((const guint8 **)&ip, end);
6158 opcode = &mono_opcodes [i];
6159 switch (opcode->argument) {
6160 case MonoInlineNone:
6163 case MonoInlineString:
6164 case MonoInlineType:
6165 case MonoInlineField:
6166 case MonoInlineMethod:
6169 case MonoShortInlineR:
6176 case MonoShortInlineVar:
6177 case MonoShortInlineI:
6180 case MonoShortInlineBrTarget:
6181 target = start + cli_addr + 2 + (signed char)ip [1];
6182 GET_BBLOCK (cfg, bblock, target);
6185 GET_BBLOCK (cfg, bblock, ip);
6187 case MonoInlineBrTarget:
6188 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
6189 GET_BBLOCK (cfg, bblock, target);
6192 GET_BBLOCK (cfg, bblock, ip);
6194 case MonoInlineSwitch: {
6195 guint32 n = read32 (ip + 1);
6198 cli_addr += 5 + 4 * n;
6199 target = start + cli_addr;
6200 GET_BBLOCK (cfg, bblock, target);
6202 for (j = 0; j < n; ++j) {
6203 target = start + cli_addr + (gint32)read32 (ip);
6204 GET_BBLOCK (cfg, bblock, target);
6214 g_assert_not_reached ();
6217 if (i == CEE_THROW) {
6218 unsigned char *bb_start = ip - 1;
6220 /* Find the start of the bblock containing the throw */
6222 while ((bb_start >= start) && !bblock) {
6223 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
6227 bblock->out_of_line = 1;
6237 static inline MonoMethod *
6238 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
6242 if (m->wrapper_type != MONO_WRAPPER_NONE) {
6243 method = mono_method_get_wrapper_data (m, token);
6245 method = mono_class_inflate_generic_method (method, context);
6247 method = mono_get_method_full (m->klass->image, token, klass, context);
6253 static inline MonoMethod *
6254 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
6256 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
6258 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
6264 static inline MonoClass*
6265 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
6269 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6270 klass = mono_method_get_wrapper_data (method, token);
6272 klass = mono_class_inflate_generic_class (klass, context);
6274 klass = mono_class_get_full (method->klass->image, token, context);
6277 mono_class_init (klass);
6281 static inline MonoMethodSignature*
6282 mini_get_signature (MonoMethod *method, guint32 token, MonoGenericContext *context)
6284 MonoMethodSignature *fsig;
6286 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6289 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
6291 fsig = mono_inflate_generic_signature (fsig, context, &error);
6293 g_assert (mono_error_ok (&error));
6296 fsig = mono_metadata_parse_signature (method->klass->image, token);
6302 * Returns TRUE if the JIT should abort inlining because "callee"
6303 * is influenced by security attributes.
6306 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
6310 if ((cfg->method != caller) && mono_security_method_has_declsec (callee)) {
6314 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
6315 if (result == MONO_JIT_SECURITY_OK)
6318 if (result == MONO_JIT_LINKDEMAND_ECMA) {
6319 /* Generate code to throw a SecurityException before the actual call/link */
6320 MonoSecurityManager *secman = mono_security_manager_get_methods ();
6323 NEW_ICONST (cfg, args [0], 4);
6324 NEW_METHODCONST (cfg, args [1], caller);
6325 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
6326 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
6327 /* don't hide previous results */
6328 mono_cfg_set_exception (cfg, MONO_EXCEPTION_SECURITY_LINKDEMAND);
6329 cfg->exception_data = result;
6337 throw_exception (void)
6339 static MonoMethod *method = NULL;
6342 MonoSecurityManager *secman = mono_security_manager_get_methods ();
6343 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
6350 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
6352 MonoMethod *thrower = throw_exception ();
6355 EMIT_NEW_PCONST (cfg, args [0], ex);
6356 mono_emit_method_call (cfg, thrower, args, NULL);
6360 * Return the original method is a wrapper is specified. We can only access
6361 * the custom attributes from the original method.
6364 get_original_method (MonoMethod *method)
6366 if (method->wrapper_type == MONO_WRAPPER_NONE)
6369 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
6370 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
6373 /* in other cases we need to find the original method */
6374 return mono_marshal_method_from_wrapper (method);
6378 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
6379 MonoBasicBlock *bblock, unsigned char *ip)
6381 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
6382 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
6384 emit_throw_exception (cfg, ex);
6388 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
6389 MonoBasicBlock *bblock, unsigned char *ip)
6391 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
6392 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
6394 emit_throw_exception (cfg, ex);
6398 * Check that the IL instructions at ip are the array initialization
6399 * sequence and return the pointer to the data and the size.
6402 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
6405 * newarr[System.Int32]
6407 * ldtoken field valuetype ...
6408 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
6410 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
6411 guint32 token = read32 (ip + 7);
6412 guint32 field_token = read32 (ip + 2);
6413 guint32 field_index = field_token & 0xffffff;
6415 const char *data_ptr;
6417 MonoMethod *cmethod;
6418 MonoClass *dummy_class;
6419 MonoClassField *field = mono_field_from_token (method->klass->image, field_token, &dummy_class, NULL);
6425 *out_field_token = field_token;
6427 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
6430 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
6432 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
6433 case MONO_TYPE_BOOLEAN:
6437 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
6438 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
6439 case MONO_TYPE_CHAR:
6456 if (size > mono_type_size (field->type, &dummy_align))
6459 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
6460 if (!method->klass->image->dynamic) {
6461 field_index = read32 (ip + 2) & 0xffffff;
6462 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
6463 data_ptr = mono_image_rva_map (method->klass->image, rva);
6464 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
6465 /* for aot code we do the lookup on load */
6466 if (aot && data_ptr)
6467 return GUINT_TO_POINTER (rva);
6469 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
6471 data_ptr = mono_field_get_data (field);
6479 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
6481 char *method_fname = mono_method_full_name (method, TRUE);
6483 MonoMethodHeader *header = mono_method_get_header (method);
6485 if (header->code_size == 0)
6486 method_code = g_strdup ("method body is empty.");
6488 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
6489 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
6490 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
6491 g_free (method_fname);
6492 g_free (method_code);
6493 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
6497 set_exception_object (MonoCompile *cfg, MonoException *exception)
6499 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OBJECT_SUPPLIED);
6500 MONO_GC_REGISTER_ROOT_SINGLE (cfg->exception_ptr);
6501 cfg->exception_ptr = exception;
6505 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
6508 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
6509 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
6510 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
6511 /* Optimize reg-reg moves away */
6513 * Can't optimize other opcodes, since sp[0] might point to
6514 * the last ins of a decomposed opcode.
6516 sp [0]->dreg = (cfg)->locals [n]->dreg;
6518 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
6523 * ldloca inhibits many optimizations so try to get rid of it in common
6526 static inline unsigned char *
6527 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
6537 local = read16 (ip + 2);
6541 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
6542 /* From the INITOBJ case */
6543 token = read32 (ip + 2);
6544 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
6545 CHECK_TYPELOAD (klass);
6546 type = mini_replace_type (&klass->byval_arg);
6547 emit_init_local (cfg, local, type, TRUE);
6555 is_exception_class (MonoClass *class)
6558 if (class == mono_defaults.exception_class)
6560 class = class->parent;
6566 * is_jit_optimizer_disabled:
6568 * Determine whenever M's assembly has a DebuggableAttribute with the
6569 * IsJITOptimizerDisabled flag set.
6572 is_jit_optimizer_disabled (MonoMethod *m)
6574 MonoAssembly *ass = m->klass->image->assembly;
6575 MonoCustomAttrInfo* attrs;
6576 static MonoClass *klass;
6578 gboolean val = FALSE;
6581 if (ass->jit_optimizer_disabled_inited)
6582 return ass->jit_optimizer_disabled;
6585 klass = mono_class_from_name (mono_defaults.corlib, "System.Diagnostics", "DebuggableAttribute");
6588 ass->jit_optimizer_disabled = FALSE;
6589 mono_memory_barrier ();
6590 ass->jit_optimizer_disabled_inited = TRUE;
6594 attrs = mono_custom_attrs_from_assembly (ass);
6596 for (i = 0; i < attrs->num_attrs; ++i) {
6597 MonoCustomAttrEntry *attr = &attrs->attrs [i];
6600 MonoMethodSignature *sig;
6602 if (!attr->ctor || attr->ctor->klass != klass)
6604 /* Decode the attribute. See reflection.c */
6605 len = attr->data_size;
6606 p = (const char*)attr->data;
6607 g_assert (read16 (p) == 0x0001);
6610 // FIXME: Support named parameters
6611 sig = mono_method_signature (attr->ctor);
6612 if (sig->param_count != 2 || sig->params [0]->type != MONO_TYPE_BOOLEAN || sig->params [1]->type != MONO_TYPE_BOOLEAN)
6614 /* Two boolean arguments */
6618 mono_custom_attrs_free (attrs);
6621 ass->jit_optimizer_disabled = val;
6622 mono_memory_barrier ();
6623 ass->jit_optimizer_disabled_inited = TRUE;
6629 is_supported_tail_call (MonoCompile *cfg, MonoMethod *method, MonoMethod *cmethod, MonoMethodSignature *fsig, int call_opcode)
6631 gboolean supported_tail_call;
6634 #ifdef MONO_ARCH_HAVE_OP_TAIL_CALL
6635 supported_tail_call = mono_arch_tail_call_supported (cfg, mono_method_signature (method), mono_method_signature (cmethod));
6637 supported_tail_call = mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret);
6640 for (i = 0; i < fsig->param_count; ++i) {
6641 if (fsig->params [i]->byref || fsig->params [i]->type == MONO_TYPE_PTR || fsig->params [i]->type == MONO_TYPE_FNPTR)
6642 /* These can point to the current method's stack */
6643 supported_tail_call = FALSE;
6645 if (fsig->hasthis && cmethod->klass->valuetype)
6646 /* this might point to the current method's stack */
6647 supported_tail_call = FALSE;
6648 if (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)
6649 supported_tail_call = FALSE;
6650 if (cfg->method->save_lmf)
6651 supported_tail_call = FALSE;
6652 if (cmethod->wrapper_type && cmethod->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
6653 supported_tail_call = FALSE;
6654 if (call_opcode != CEE_CALL)
6655 supported_tail_call = FALSE;
6657 /* Debugging support */
6659 if (supported_tail_call) {
6660 if (!mono_debug_count ())
6661 supported_tail_call = FALSE;
6665 return supported_tail_call;
6668 /* the JIT intercepts ldflda instructions to the tlsdata field in ThreadLocal<T> and redirects
6669 * it to the thread local value based on the tls_offset field. Every other kind of access to
6670 * the field causes an assert.
6673 is_magic_tls_access (MonoClassField *field)
6675 if (strcmp (field->name, "tlsdata"))
6677 if (strcmp (field->parent->name, "ThreadLocal`1"))
6679 return field->parent->image == mono_defaults.corlib;
6682 /* emits the code needed to access a managed tls var (like ThreadStatic)
6683 * with the value of the tls offset in offset_reg. thread_ins represents the MonoInternalThread
6684 * pointer for the current thread.
6685 * Returns the MonoInst* representing the address of the tls var.
6688 emit_managed_static_data_access (MonoCompile *cfg, MonoInst *thread_ins, int offset_reg)
6691 int static_data_reg, array_reg, dreg;
6692 int offset2_reg, idx_reg;
6693 // inlined access to the tls data
6694 // idx = (offset >> 24) - 1;
6695 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
6696 static_data_reg = alloc_ireg (cfg);
6697 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, G_STRUCT_OFFSET (MonoInternalThread, static_data));
6698 idx_reg = alloc_ireg (cfg);
6699 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
6700 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
6701 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
6702 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
6703 array_reg = alloc_ireg (cfg);
6704 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
6705 offset2_reg = alloc_ireg (cfg);
6706 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
6707 dreg = alloc_ireg (cfg);
6708 EMIT_NEW_BIALU (cfg, addr, OP_PADD, dreg, array_reg, offset2_reg);
6713 * redirect access to the tlsdata field to the tls var given by the tls_offset field.
6714 * this address is cached per-method in cached_tls_addr.
6717 create_magic_tls_access (MonoCompile *cfg, MonoClassField *tls_field, MonoInst **cached_tls_addr, MonoInst *thread_local)
6719 MonoInst *load, *addr, *temp, *store, *thread_ins;
6720 MonoClassField *offset_field;
6722 if (*cached_tls_addr) {
6723 EMIT_NEW_TEMPLOAD (cfg, addr, (*cached_tls_addr)->inst_c0);
6726 thread_ins = mono_get_thread_intrinsic (cfg);
6727 offset_field = mono_class_get_field_from_name (tls_field->parent, "tls_offset");
6729 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, offset_field->type, thread_local->dreg, offset_field->offset);
6731 MONO_ADD_INS (cfg->cbb, thread_ins);
6733 MonoMethod *thread_method;
6734 thread_method = mono_class_get_method_from_name (mono_get_thread_class(), "CurrentInternalThread_internal", 0);
6735 thread_ins = mono_emit_method_call (cfg, thread_method, NULL, NULL);
6737 addr = emit_managed_static_data_access (cfg, thread_ins, load->dreg);
6738 addr->klass = mono_class_from_mono_type (tls_field->type);
6739 addr->type = STACK_MP;
6740 *cached_tls_addr = temp = mono_compile_create_var (cfg, type_from_stack_type (addr), OP_LOCAL);
6741 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, addr);
6743 EMIT_NEW_TEMPLOAD (cfg, addr, temp->inst_c0);
6748 * mono_method_to_ir:
6750 * Translate the .net IL into linear IR.
6753 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
6754 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
6755 guint inline_offset, gboolean is_virtual_call)
6758 MonoInst *ins, **sp, **stack_start;
6759 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
6760 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
6761 MonoMethod *cmethod, *method_definition;
6762 MonoInst **arg_array;
6763 MonoMethodHeader *header;
6765 guint32 token, ins_flag;
6767 MonoClass *constrained_call = NULL;
6768 unsigned char *ip, *end, *target, *err_pos;
6769 MonoMethodSignature *sig;
6770 MonoGenericContext *generic_context = NULL;
6771 MonoGenericContainer *generic_container = NULL;
6772 MonoType **param_types;
6773 int i, n, start_new_bblock, dreg;
6774 int num_calls = 0, inline_costs = 0;
6775 int breakpoint_id = 0;
6777 MonoBoolean security, pinvoke;
6778 MonoSecurityManager* secman = NULL;
6779 MonoDeclSecurityActions actions;
6780 GSList *class_inits = NULL;
6781 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
6783 gboolean init_locals, seq_points, skip_dead_blocks;
6784 gboolean disable_inline, sym_seq_points = FALSE;
6785 MonoInst *cached_tls_addr = NULL;
6786 MonoDebugMethodInfo *minfo;
6787 MonoBitSet *seq_point_locs = NULL;
6788 MonoBitSet *seq_point_set_locs = NULL;
6790 disable_inline = is_jit_optimizer_disabled (method);
6792 /* serialization and xdomain stuff may need access to private fields and methods */
6793 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
6794 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
6795 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
6796 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
6797 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
6798 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
6800 dont_verify |= mono_security_smcs_hack_enabled ();
6802 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
6803 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
6804 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
6805 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
6806 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_STELEMREF;
6808 image = method->klass->image;
6809 header = mono_method_get_header (method);
6811 MonoLoaderError *error;
6813 if ((error = mono_loader_get_last_error ())) {
6814 mono_cfg_set_exception (cfg, error->exception_type);
6816 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
6817 cfg->exception_message = g_strdup_printf ("Missing or incorrect header for method %s", cfg->method->name);
6819 goto exception_exit;
6821 generic_container = mono_method_get_generic_container (method);
6822 sig = mono_method_signature (method);
6823 num_args = sig->hasthis + sig->param_count;
6824 ip = (unsigned char*)header->code;
6825 cfg->cil_start = ip;
6826 end = ip + header->code_size;
6827 cfg->stat_cil_code_size += header->code_size;
6829 seq_points = cfg->gen_seq_points && cfg->method == method;
6830 #ifdef PLATFORM_ANDROID
6831 seq_points &= cfg->method->wrapper_type == MONO_WRAPPER_NONE;
6834 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
6835 /* We could hit a seq point before attaching to the JIT (#8338) */
6839 if (cfg->gen_seq_points && cfg->method == method) {
6840 minfo = mono_debug_lookup_method (method);
6842 int i, n_il_offsets;
6846 mono_debug_symfile_get_line_numbers_full (minfo, NULL, NULL, &n_il_offsets, &il_offsets, &line_numbers, NULL, NULL);
6847 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
6848 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
6849 sym_seq_points = TRUE;
6850 for (i = 0; i < n_il_offsets; ++i) {
6851 if (il_offsets [i] < header->code_size)
6852 mono_bitset_set_fast (seq_point_locs, il_offsets [i]);
6854 g_free (il_offsets);
6855 g_free (line_numbers);
6860 * Methods without init_locals set could cause asserts in various passes
6861 * (#497220). To work around this, we emit dummy initialization opcodes
6862 * (OP_DUMMY_ICONST etc.) which generate no code. These are only supported
6863 * on some platforms.
6865 if ((cfg->opt & MONO_OPT_UNSAFE) && ARCH_HAVE_DUMMY_INIT)
6866 init_locals = header->init_locals;
6870 method_definition = method;
6871 while (method_definition->is_inflated) {
6872 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
6873 method_definition = imethod->declaring;
6876 /* SkipVerification is not allowed if core-clr is enabled */
6877 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
6879 dont_verify_stloc = TRUE;
6882 if (sig->is_inflated)
6883 generic_context = mono_method_get_context (method);
6884 else if (generic_container)
6885 generic_context = &generic_container->context;
6886 cfg->generic_context = generic_context;
6888 if (!cfg->generic_sharing_context)
6889 g_assert (!sig->has_type_parameters);
6891 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
6892 g_assert (method->is_inflated);
6893 g_assert (mono_method_get_context (method)->method_inst);
6895 if (method->is_inflated && mono_method_get_context (method)->method_inst)
6896 g_assert (sig->generic_param_count);
6898 if (cfg->method == method) {
6899 cfg->real_offset = 0;
6901 cfg->real_offset = inline_offset;
6904 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
6905 cfg->cil_offset_to_bb_len = header->code_size;
6907 cfg->current_method = method;
6909 if (cfg->verbose_level > 2)
6910 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
6912 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
6914 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
6915 for (n = 0; n < sig->param_count; ++n)
6916 param_types [n + sig->hasthis] = sig->params [n];
6917 cfg->arg_types = param_types;
6919 dont_inline = g_list_prepend (dont_inline, method);
6920 if (cfg->method == method) {
6922 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
6923 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
6926 NEW_BBLOCK (cfg, start_bblock);
6927 cfg->bb_entry = start_bblock;
6928 start_bblock->cil_code = NULL;
6929 start_bblock->cil_length = 0;
6930 #if defined(__native_client_codegen__)
6931 MONO_INST_NEW (cfg, ins, OP_NACL_GC_SAFE_POINT);
6932 ins->dreg = alloc_dreg (cfg, STACK_I4);
6933 MONO_ADD_INS (start_bblock, ins);
6937 NEW_BBLOCK (cfg, end_bblock);
6938 cfg->bb_exit = end_bblock;
6939 end_bblock->cil_code = NULL;
6940 end_bblock->cil_length = 0;
6941 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
6942 g_assert (cfg->num_bblocks == 2);
6944 arg_array = cfg->args;
6946 if (header->num_clauses) {
6947 cfg->spvars = g_hash_table_new (NULL, NULL);
6948 cfg->exvars = g_hash_table_new (NULL, NULL);
6950 /* handle exception clauses */
6951 for (i = 0; i < header->num_clauses; ++i) {
6952 MonoBasicBlock *try_bb;
6953 MonoExceptionClause *clause = &header->clauses [i];
6954 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
6955 try_bb->real_offset = clause->try_offset;
6956 try_bb->try_start = TRUE;
6957 try_bb->region = ((i + 1) << 8) | clause->flags;
6958 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
6959 tblock->real_offset = clause->handler_offset;
6960 tblock->flags |= BB_EXCEPTION_HANDLER;
6963 * Linking the try block with the EH block hinders inlining as we won't be able to
6964 * merge the bblocks from inlining and produce an artificial hole for no good reason.
6966 if (COMPILE_LLVM (cfg))
6967 link_bblock (cfg, try_bb, tblock);
6969 if (*(ip + clause->handler_offset) == CEE_POP)
6970 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
6972 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
6973 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
6974 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
6975 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
6976 MONO_ADD_INS (tblock, ins);
6978 if (seq_points && clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY) {
6979 /* finally clauses already have a seq point */
6980 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
6981 MONO_ADD_INS (tblock, ins);
6984 /* todo: is a fault block unsafe to optimize? */
6985 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
6986 tblock->flags |= BB_EXCEPTION_UNSAFE;
6990 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
6992 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
6994 /* catch and filter blocks get the exception object on the stack */
6995 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
6996 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
6997 MonoInst *dummy_use;
6999 /* mostly like handle_stack_args (), but just sets the input args */
7000 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
7001 tblock->in_scount = 1;
7002 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
7003 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
7006 * Add a dummy use for the exvar so its liveness info will be
7010 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
7012 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
7013 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
7014 tblock->flags |= BB_EXCEPTION_HANDLER;
7015 tblock->real_offset = clause->data.filter_offset;
7016 tblock->in_scount = 1;
7017 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
7018 /* The filter block shares the exvar with the handler block */
7019 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
7020 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
7021 MONO_ADD_INS (tblock, ins);
7025 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
7026 clause->data.catch_class &&
7027 cfg->generic_sharing_context &&
7028 mono_class_check_context_used (clause->data.catch_class)) {
7030 * In shared generic code with catch
7031 * clauses containing type variables
7032 * the exception handling code has to
7033 * be able to get to the rgctx.
7034 * Therefore we have to make sure that
7035 * the vtable/mrgctx argument (for
7036 * static or generic methods) or the
7037 * "this" argument (for non-static
7038 * methods) are live.
7040 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
7041 mini_method_get_context (method)->method_inst ||
7042 method->klass->valuetype) {
7043 mono_get_vtable_var (cfg);
7045 MonoInst *dummy_use;
7047 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
7052 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
7053 cfg->cbb = start_bblock;
7054 cfg->args = arg_array;
7055 mono_save_args (cfg, sig, inline_args);
7058 /* FIRST CODE BLOCK */
7059 NEW_BBLOCK (cfg, bblock);
7060 bblock->cil_code = ip;
7064 ADD_BBLOCK (cfg, bblock);
7066 if (cfg->method == method) {
7067 breakpoint_id = mono_debugger_method_has_breakpoint (method);
7068 if (breakpoint_id) {
7069 MONO_INST_NEW (cfg, ins, OP_BREAK);
7070 MONO_ADD_INS (bblock, ins);
7074 if (mono_security_cas_enabled ())
7075 secman = mono_security_manager_get_methods ();
7077 security = (secman && mono_security_method_has_declsec (method));
7078 /* at this point having security doesn't mean we have any code to generate */
7079 if (security && (cfg->method == method)) {
7080 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
7081 * And we do not want to enter the next section (with allocation) if we
7082 * have nothing to generate */
7083 security = mono_declsec_get_demands (method, &actions);
7086 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
7087 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
7089 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
7090 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
7091 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
7093 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
7094 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
7098 mono_custom_attrs_free (custom);
7101 custom = mono_custom_attrs_from_class (wrapped->klass);
7102 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
7106 mono_custom_attrs_free (custom);
7109 /* not a P/Invoke after all */
7114 /* we use a separate basic block for the initialization code */
7115 NEW_BBLOCK (cfg, init_localsbb);
7116 cfg->bb_init = init_localsbb;
7117 init_localsbb->real_offset = cfg->real_offset;
7118 start_bblock->next_bb = init_localsbb;
7119 init_localsbb->next_bb = bblock;
7120 link_bblock (cfg, start_bblock, init_localsbb);
7121 link_bblock (cfg, init_localsbb, bblock);
7123 cfg->cbb = init_localsbb;
7125 if (cfg->gsharedvt && cfg->method == method) {
7126 MonoGSharedVtMethodInfo *info;
7127 MonoInst *var, *locals_var;
7130 info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoGSharedVtMethodInfo));
7131 info->method = cfg->method;
7132 info->count_entries = 16;
7133 info->entries = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
7134 cfg->gsharedvt_info = info;
7136 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
7137 /* prevent it from being register allocated */
7138 //var->flags |= MONO_INST_VOLATILE;
7139 cfg->gsharedvt_info_var = var;
7141 ins = emit_get_rgctx_gsharedvt_method (cfg, mini_method_check_context_used (cfg, method), method, info);
7142 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, var->dreg, ins->dreg);
7144 /* Allocate locals */
7145 locals_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
7146 /* prevent it from being register allocated */
7147 //locals_var->flags |= MONO_INST_VOLATILE;
7148 cfg->gsharedvt_locals_var = locals_var;
7150 dreg = alloc_ireg (cfg);
7151 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, dreg, var->dreg, G_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, locals_size));
7153 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
7154 ins->dreg = locals_var->dreg;
7156 MONO_ADD_INS (cfg->cbb, ins);
7157 cfg->gsharedvt_locals_var_ins = ins;
7159 cfg->flags |= MONO_CFG_HAS_ALLOCA;
7162 ins->flags |= MONO_INST_INIT;
7166 /* at this point we know, if security is TRUE, that some code needs to be generated */
7167 if (security && (cfg->method == method)) {
7170 cfg->stat_cas_demand_generation++;
7172 if (actions.demand.blob) {
7173 /* Add code for SecurityAction.Demand */
7174 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
7175 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
7176 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
7177 mono_emit_method_call (cfg, secman->demand, args, NULL);
7179 if (actions.noncasdemand.blob) {
7180 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
7181 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
7182 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
7183 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
7184 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
7185 mono_emit_method_call (cfg, secman->demand, args, NULL);
7187 if (actions.demandchoice.blob) {
7188 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
7189 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
7190 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
7191 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
7192 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
7196 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
7198 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
7201 if (mono_security_core_clr_enabled ()) {
7202 /* check if this is native code, e.g. an icall or a p/invoke */
7203 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
7204 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
7206 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
7207 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
7209 /* if this ia a native call then it can only be JITted from platform code */
7210 if ((icall || pinvk) && method->klass && method->klass->image) {
7211 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
7212 MonoException *ex = icall ? mono_get_exception_security () :
7213 mono_get_exception_method_access ();
7214 emit_throw_exception (cfg, ex);
7221 CHECK_CFG_EXCEPTION;
7223 if (header->code_size == 0)
7226 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
7231 if (cfg->method == method)
7232 mono_debug_init_method (cfg, bblock, breakpoint_id);
7234 for (n = 0; n < header->num_locals; ++n) {
7235 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
7240 /* We force the vtable variable here for all shared methods
7241 for the possibility that they might show up in a stack
7242 trace where their exact instantiation is needed. */
7243 if (cfg->generic_sharing_context && method == cfg->method) {
7244 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
7245 mini_method_get_context (method)->method_inst ||
7246 method->klass->valuetype) {
7247 mono_get_vtable_var (cfg);
7249 /* FIXME: Is there a better way to do this?
7250 We need the variable live for the duration
7251 of the whole method. */
7252 cfg->args [0]->flags |= MONO_INST_VOLATILE;
7256 /* add a check for this != NULL to inlined methods */
7257 if (is_virtual_call) {
7260 NEW_ARGLOAD (cfg, arg_ins, 0);
7261 MONO_ADD_INS (cfg->cbb, arg_ins);
7262 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
7265 skip_dead_blocks = !dont_verify;
7266 if (skip_dead_blocks) {
7267 original_bb = bb = mono_basic_block_split (method, &error);
7268 if (!mono_error_ok (&error)) {
7269 mono_error_cleanup (&error);
7275 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
7276 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
7279 start_new_bblock = 0;
7282 if (cfg->method == method)
7283 cfg->real_offset = ip - header->code;
7285 cfg->real_offset = inline_offset;
7290 if (start_new_bblock) {
7291 bblock->cil_length = ip - bblock->cil_code;
7292 if (start_new_bblock == 2) {
7293 g_assert (ip == tblock->cil_code);
7295 GET_BBLOCK (cfg, tblock, ip);
7297 bblock->next_bb = tblock;
7300 start_new_bblock = 0;
7301 for (i = 0; i < bblock->in_scount; ++i) {
7302 if (cfg->verbose_level > 3)
7303 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
7304 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
7308 g_slist_free (class_inits);
7311 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
7312 link_bblock (cfg, bblock, tblock);
7313 if (sp != stack_start) {
7314 handle_stack_args (cfg, stack_start, sp - stack_start);
7316 CHECK_UNVERIFIABLE (cfg);
7318 bblock->next_bb = tblock;
7321 for (i = 0; i < bblock->in_scount; ++i) {
7322 if (cfg->verbose_level > 3)
7323 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
7324 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
7327 g_slist_free (class_inits);
7332 if (skip_dead_blocks) {
7333 int ip_offset = ip - header->code;
7335 if (ip_offset == bb->end)
7339 int op_size = mono_opcode_size (ip, end);
7340 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
7342 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
7344 if (ip_offset + op_size == bb->end) {
7345 MONO_INST_NEW (cfg, ins, OP_NOP);
7346 MONO_ADD_INS (bblock, ins);
7347 start_new_bblock = 1;
7355 * Sequence points are points where the debugger can place a breakpoint.
7356 * Currently, we generate these automatically at points where the IL
7359 if (seq_points && ((sp == stack_start) || (sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code)))) {
7361 * Make methods interruptable at the beginning, and at the targets of
7362 * backward branches.
7363 * Also, do this at the start of every bblock in methods with clauses too,
7364 * to be able to handle instructions with inprecise control flow like
7366 * Backward branches are handled at the end of method-to-ir ().
7368 gboolean intr_loc = ip == header->code || (!cfg->cbb->last_ins && cfg->header->num_clauses);
7370 /* Avoid sequence points on empty IL like .volatile */
7371 // FIXME: Enable this
7372 //if (!(cfg->cbb->last_ins && cfg->cbb->last_ins->opcode == OP_SEQ_POINT)) {
7373 NEW_SEQ_POINT (cfg, ins, ip - header->code, intr_loc);
7374 if (sp != stack_start)
7375 ins->flags |= MONO_INST_NONEMPTY_STACK;
7376 MONO_ADD_INS (cfg->cbb, ins);
7379 mono_bitset_set_fast (seq_point_set_locs, ip - header->code);
7382 bblock->real_offset = cfg->real_offset;
7384 if ((cfg->method == method) && cfg->coverage_info) {
7385 guint32 cil_offset = ip - header->code;
7386 cfg->coverage_info->data [cil_offset].cil_code = ip;
7388 /* TODO: Use an increment here */
7389 #if defined(TARGET_X86)
7390 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
7391 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
7393 MONO_ADD_INS (cfg->cbb, ins);
7395 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
7396 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
7400 if (cfg->verbose_level > 3)
7401 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
7405 if (seq_points && !sym_seq_points && sp != stack_start) {
7407 * The C# compiler uses these nops to notify the JIT that it should
7408 * insert seq points.
7410 NEW_SEQ_POINT (cfg, ins, ip - header->code, FALSE);
7411 MONO_ADD_INS (cfg->cbb, ins);
7413 if (cfg->keep_cil_nops)
7414 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
7416 MONO_INST_NEW (cfg, ins, OP_NOP);
7418 MONO_ADD_INS (bblock, ins);
7421 if (should_insert_brekpoint (cfg->method)) {
7422 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
7424 MONO_INST_NEW (cfg, ins, OP_NOP);
7427 MONO_ADD_INS (bblock, ins);
7433 CHECK_STACK_OVF (1);
7434 n = (*ip)-CEE_LDARG_0;
7436 EMIT_NEW_ARGLOAD (cfg, ins, n);
7444 CHECK_STACK_OVF (1);
7445 n = (*ip)-CEE_LDLOC_0;
7447 EMIT_NEW_LOCLOAD (cfg, ins, n);
7456 n = (*ip)-CEE_STLOC_0;
7459 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
7461 emit_stloc_ir (cfg, sp, header, n);
7468 CHECK_STACK_OVF (1);
7471 EMIT_NEW_ARGLOAD (cfg, ins, n);
7477 CHECK_STACK_OVF (1);
7480 NEW_ARGLOADA (cfg, ins, n);
7481 MONO_ADD_INS (cfg->cbb, ins);
7491 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
7493 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
7498 CHECK_STACK_OVF (1);
7501 EMIT_NEW_LOCLOAD (cfg, ins, n);
7505 case CEE_LDLOCA_S: {
7506 unsigned char *tmp_ip;
7508 CHECK_STACK_OVF (1);
7509 CHECK_LOCAL (ip [1]);
7511 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
7517 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
7526 CHECK_LOCAL (ip [1]);
7527 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
7529 emit_stloc_ir (cfg, sp, header, ip [1]);
7534 CHECK_STACK_OVF (1);
7535 EMIT_NEW_PCONST (cfg, ins, NULL);
7536 ins->type = STACK_OBJ;
7541 CHECK_STACK_OVF (1);
7542 EMIT_NEW_ICONST (cfg, ins, -1);
7555 CHECK_STACK_OVF (1);
7556 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
7562 CHECK_STACK_OVF (1);
7564 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
7570 CHECK_STACK_OVF (1);
7571 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
7577 CHECK_STACK_OVF (1);
7578 MONO_INST_NEW (cfg, ins, OP_I8CONST);
7579 ins->type = STACK_I8;
7580 ins->dreg = alloc_dreg (cfg, STACK_I8);
7582 ins->inst_l = (gint64)read64 (ip);
7583 MONO_ADD_INS (bblock, ins);
7589 gboolean use_aotconst = FALSE;
7591 #ifdef TARGET_POWERPC
7592 /* FIXME: Clean this up */
7593 if (cfg->compile_aot)
7594 use_aotconst = TRUE;
7597 /* FIXME: we should really allocate this only late in the compilation process */
7598 f = mono_domain_alloc (cfg->domain, sizeof (float));
7600 CHECK_STACK_OVF (1);
7606 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
7608 dreg = alloc_freg (cfg);
7609 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
7610 ins->type = STACK_R8;
7612 MONO_INST_NEW (cfg, ins, OP_R4CONST);
7613 ins->type = STACK_R8;
7614 ins->dreg = alloc_dreg (cfg, STACK_R8);
7616 MONO_ADD_INS (bblock, ins);
7626 gboolean use_aotconst = FALSE;
7628 #ifdef TARGET_POWERPC
7629 /* FIXME: Clean this up */
7630 if (cfg->compile_aot)
7631 use_aotconst = TRUE;
7634 /* FIXME: we should really allocate this only late in the compilation process */
7635 d = mono_domain_alloc (cfg->domain, sizeof (double));
7637 CHECK_STACK_OVF (1);
7643 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
7645 dreg = alloc_freg (cfg);
7646 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
7647 ins->type = STACK_R8;
7649 MONO_INST_NEW (cfg, ins, OP_R8CONST);
7650 ins->type = STACK_R8;
7651 ins->dreg = alloc_dreg (cfg, STACK_R8);
7653 MONO_ADD_INS (bblock, ins);
7662 MonoInst *temp, *store;
7664 CHECK_STACK_OVF (1);
7668 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
7669 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
7671 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
7674 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
7687 if (sp [0]->type == STACK_R8)
7688 /* we need to pop the value from the x86 FP stack */
7689 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
7695 INLINE_FAILURE ("jmp");
7696 GSHAREDVT_FAILURE (*ip);
7699 if (stack_start != sp)
7701 token = read32 (ip + 1);
7702 /* FIXME: check the signature matches */
7703 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7705 if (!cmethod || mono_loader_get_last_error ())
7708 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
7709 GENERIC_SHARING_FAILURE (CEE_JMP);
7711 if (mono_security_cas_enabled ())
7712 CHECK_CFG_EXCEPTION;
7714 if (ARCH_HAVE_OP_TAIL_CALL) {
7715 MonoMethodSignature *fsig = mono_method_signature (cmethod);
7718 /* Handle tail calls similarly to calls */
7719 n = fsig->param_count + fsig->hasthis;
7723 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
7724 call->method = cmethod;
7725 call->tail_call = TRUE;
7726 call->signature = mono_method_signature (cmethod);
7727 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
7728 call->inst.inst_p0 = cmethod;
7729 for (i = 0; i < n; ++i)
7730 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
7732 mono_arch_emit_call (cfg, call);
7733 MONO_ADD_INS (bblock, (MonoInst*)call);
7735 for (i = 0; i < num_args; ++i)
7736 /* Prevent arguments from being optimized away */
7737 arg_array [i]->flags |= MONO_INST_VOLATILE;
7739 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
7740 ins = (MonoInst*)call;
7741 ins->inst_p0 = cmethod;
7742 MONO_ADD_INS (bblock, ins);
7746 start_new_bblock = 1;
7751 case CEE_CALLVIRT: {
7752 MonoInst *addr = NULL;
7753 MonoMethodSignature *fsig = NULL;
7755 int virtual = *ip == CEE_CALLVIRT;
7756 int calli = *ip == CEE_CALLI;
7757 gboolean pass_imt_from_rgctx = FALSE;
7758 MonoInst *imt_arg = NULL;
7759 MonoInst *keep_this_alive = NULL;
7760 gboolean pass_vtable = FALSE;
7761 gboolean pass_mrgctx = FALSE;
7762 MonoInst *vtable_arg = NULL;
7763 gboolean check_this = FALSE;
7764 gboolean supported_tail_call = FALSE;
7765 gboolean tail_call = FALSE;
7766 gboolean need_seq_point = FALSE;
7767 guint32 call_opcode = *ip;
7768 gboolean emit_widen = TRUE;
7769 gboolean push_res = TRUE;
7770 gboolean skip_ret = FALSE;
7771 gboolean delegate_invoke = FALSE;
7774 token = read32 (ip + 1);
7779 //GSHAREDVT_FAILURE (*ip);
7784 fsig = mini_get_signature (method, token, generic_context);
7785 n = fsig->param_count + fsig->hasthis;
7787 if (method->dynamic && fsig->pinvoke) {
7791 * This is a call through a function pointer using a pinvoke
7792 * signature. Have to create a wrapper and call that instead.
7793 * FIXME: This is very slow, need to create a wrapper at JIT time
7794 * instead based on the signature.
7796 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
7797 EMIT_NEW_PCONST (cfg, args [1], fsig);
7799 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
7802 MonoMethod *cil_method;
7804 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7805 cil_method = cmethod;
7807 if (constrained_call) {
7808 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7809 if (cfg->verbose_level > 2)
7810 printf ("DM Constrained call to %s\n", mono_type_get_full_name (constrained_call));
7811 if (!((constrained_call->byval_arg.type == MONO_TYPE_VAR ||
7812 constrained_call->byval_arg.type == MONO_TYPE_MVAR) &&
7813 cfg->generic_sharing_context)) {
7814 cmethod = mono_get_method_constrained_with_method (image, cil_method, constrained_call, generic_context);
7817 if (cfg->verbose_level > 2)
7818 printf ("Constrained call to %s\n", mono_type_get_full_name (constrained_call));
7820 if ((constrained_call->byval_arg.type == MONO_TYPE_VAR || constrained_call->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
7822 * This is needed since get_method_constrained can't find
7823 * the method in klass representing a type var.
7824 * The type var is guaranteed to be a reference type in this
7827 if (!mini_is_gsharedvt_klass (cfg, constrained_call))
7828 g_assert (!cmethod->klass->valuetype);
7830 cmethod = mono_get_method_constrained (image, token, constrained_call, generic_context, &cil_method);
7835 if (!cmethod || mono_loader_get_last_error ())
7837 if (!dont_verify && !cfg->skip_visibility) {
7838 MonoMethod *target_method = cil_method;
7839 if (method->is_inflated) {
7840 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
7842 if (!mono_method_can_access_method (method_definition, target_method) &&
7843 !mono_method_can_access_method (method, cil_method))
7844 METHOD_ACCESS_FAILURE;
7847 if (mono_security_core_clr_enabled ())
7848 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
7850 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
7851 /* MS.NET seems to silently convert this to a callvirt */
7856 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
7857 * converts to a callvirt.
7859 * tests/bug-515884.il is an example of this behavior
7861 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
7862 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
7863 if (!virtual && mono_class_is_marshalbyref (cmethod->klass) && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
7867 if (!cmethod->klass->inited)
7868 if (!mono_class_init (cmethod->klass))
7869 TYPE_LOAD_ERROR (cmethod->klass);
7871 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
7872 mini_class_is_system_array (cmethod->klass)) {
7873 array_rank = cmethod->klass->rank;
7874 fsig = mono_method_signature (cmethod);
7876 fsig = mono_method_signature (cmethod);
7881 if (fsig->pinvoke) {
7882 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
7883 check_for_pending_exc, cfg->compile_aot);
7884 fsig = mono_method_signature (wrapper);
7885 } else if (constrained_call) {
7886 fsig = mono_method_signature (cmethod);
7888 fsig = mono_method_get_signature_full (cmethod, image, token, generic_context);
7892 mono_save_token_info (cfg, image, token, cil_method);
7894 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
7896 * Need to emit an implicit seq point after every non-void call so single stepping through nested calls like
7897 * foo (bar (), baz ())
7898 * works correctly. MS does this also:
7899 * http://stackoverflow.com/questions/6937198/making-your-net-language-step-correctly-in-the-debugger
7900 * The problem with this approach is that the debugger will stop after all calls returning a value,
7901 * even for simple cases, like:
7904 /* Special case a few common successor opcodes */
7905 if (!(ip + 5 < end && (ip [5] == CEE_POP || ip [5] == CEE_NOP)) && !(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip + 5 - header->code)))
7906 need_seq_point = TRUE;
7909 n = fsig->param_count + fsig->hasthis;
7911 /* Don't support calls made using type arguments for now */
7913 if (cfg->gsharedvt) {
7914 if (mini_is_gsharedvt_signature (cfg, fsig))
7915 GSHAREDVT_FAILURE (*ip);
7919 if (mono_security_cas_enabled ()) {
7920 if (check_linkdemand (cfg, method, cmethod))
7921 INLINE_FAILURE ("linkdemand");
7922 CHECK_CFG_EXCEPTION;
7925 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
7926 g_assert_not_reached ();
7929 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
7932 if (!cfg->generic_sharing_context && cmethod)
7933 g_assert (!mono_method_check_context_used (cmethod));
7937 //g_assert (!virtual || fsig->hasthis);
7941 if (constrained_call) {
7942 if (mini_is_gsharedvt_klass (cfg, constrained_call)) {
7944 * Constrained calls need to behave differently at runtime dependending on whenever the receiver is instantiated as ref type or as a vtype.
7946 if ((cmethod->klass != mono_defaults.object_class) && constrained_call->valuetype && cmethod->klass->valuetype) {
7947 /* The 'Own method' case below */
7948 } else if (cmethod->klass->image != mono_defaults.corlib && !(cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) && !cmethod->klass->valuetype) {
7949 /* 'The type parameter is instantiated as a reference type' case below. */
7950 } else if (((cmethod->klass == mono_defaults.object_class) || (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) || (!cmethod->klass->valuetype && cmethod->klass->image != mono_defaults.corlib)) &&
7951 (MONO_TYPE_IS_VOID (fsig->ret) || MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_IS_REFERENCE (fsig->ret) || mini_is_gsharedvt_type (cfg, fsig->ret)) &&
7952 (fsig->param_count == 0 || (!fsig->hasthis && fsig->param_count == 1) || (fsig->param_count == 1 && (MONO_TYPE_IS_REFERENCE (fsig->params [0]) || mini_is_gsharedvt_type (cfg, fsig->params [0]))))) {
7953 MonoInst *args [16];
7956 * This case handles calls to
7957 * - object:ToString()/Equals()/GetHashCode(),
7958 * - System.IComparable<T>:CompareTo()
7959 * - System.IEquatable<T>:Equals ()
7960 * plus some simple interface calls enough to support AsyncTaskMethodBuilder.
7964 if (mono_method_check_context_used (cmethod))
7965 args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (cmethod), cmethod, MONO_RGCTX_INFO_METHOD);
7967 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
7968 args [2] = emit_get_rgctx_klass (cfg, mono_class_check_context_used (constrained_call), constrained_call, MONO_RGCTX_INFO_KLASS);
7970 /* !fsig->hasthis is for the wrapper for the Object.GetType () icall */
7971 if (fsig->hasthis && fsig->param_count) {
7972 /* Pass the arguments using a localloc-ed array using the format expected by runtime_invoke () */
7973 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
7974 ins->dreg = alloc_preg (cfg);
7975 ins->inst_imm = fsig->param_count * sizeof (mgreg_t);
7976 MONO_ADD_INS (cfg->cbb, ins);
7979 if (mini_is_gsharedvt_type (cfg, fsig->params [0])) {
7982 args [3] = emit_get_gsharedvt_info_klass (cfg, mono_class_from_mono_type (fsig->params [0]), MONO_RGCTX_INFO_CLASS_BOX_TYPE);
7984 EMIT_NEW_VARLOADA_VREG (cfg, ins, sp [1]->dreg, fsig->params [0]);
7985 addr_reg = ins->dreg;
7986 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, addr_reg);
7988 EMIT_NEW_ICONST (cfg, args [3], 0);
7989 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, sp [1]->dreg);
7992 EMIT_NEW_ICONST (cfg, args [3], 0);
7993 EMIT_NEW_ICONST (cfg, args [4], 0);
7995 ins = mono_emit_jit_icall (cfg, mono_gsharedvt_constrained_call, args);
7998 if (mini_is_gsharedvt_type (cfg, fsig->ret)) {
7999 ins = handle_unbox_gsharedvt (cfg, mono_class_from_mono_type (fsig->ret), ins, &bblock);
8000 } else if (MONO_TYPE_IS_PRIMITIVE (fsig->ret)) {
8004 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), ins->dreg, sizeof (MonoObject));
8005 MONO_ADD_INS (cfg->cbb, add);
8007 NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, add->dreg, 0);
8008 MONO_ADD_INS (cfg->cbb, ins);
8009 /* ins represents the call result */
8014 GSHAREDVT_FAILURE (*ip);
8018 * We have the `constrained.' prefix opcode.
8020 if (constrained_call->valuetype && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
8022 * The type parameter is instantiated as a valuetype,
8023 * but that type doesn't override the method we're
8024 * calling, so we need to box `this'.
8026 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
8027 ins->klass = constrained_call;
8028 sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call), &bblock);
8029 CHECK_CFG_EXCEPTION;
8030 } else if (!constrained_call->valuetype) {
8031 int dreg = alloc_ireg_ref (cfg);
8034 * The type parameter is instantiated as a reference
8035 * type. We have a managed pointer on the stack, so
8036 * we need to dereference it here.
8038 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
8039 ins->type = STACK_OBJ;
8042 if (cmethod->klass->valuetype) {
8045 /* Interface method */
8048 mono_class_setup_vtable (constrained_call);
8049 CHECK_TYPELOAD (constrained_call);
8050 ioffset = mono_class_interface_offset (constrained_call, cmethod->klass);
8052 TYPE_LOAD_ERROR (constrained_call);
8053 slot = mono_method_get_vtable_slot (cmethod);
8055 TYPE_LOAD_ERROR (cmethod->klass);
8056 cmethod = constrained_call->vtable [ioffset + slot];
8058 if (cmethod->klass == mono_defaults.enum_class) {
8059 /* Enum implements some interfaces, so treat this as the first case */
8060 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
8061 ins->klass = constrained_call;
8062 sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call), &bblock);
8063 CHECK_CFG_EXCEPTION;
8068 constrained_call = NULL;
8071 if (!calli && check_call_signature (cfg, fsig, sp))
8074 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
8075 if (cmethod && (cmethod->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (cmethod->name, "Invoke"))
8076 delegate_invoke = TRUE;
8079 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_sharable_method (cfg, cmethod, fsig, sp))) {
8081 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8082 type_to_eval_stack_type ((cfg), fsig->ret, ins);
8090 * If the callee is a shared method, then its static cctor
8091 * might not get called after the call was patched.
8093 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
8094 emit_generic_class_init (cfg, cmethod->klass);
8095 CHECK_TYPELOAD (cmethod->klass);
8099 check_method_sharing (cfg, cmethod, &pass_vtable, &pass_mrgctx);
8101 if (cfg->generic_sharing_context && cmethod) {
8102 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
8104 context_used = mini_method_check_context_used (cfg, cmethod);
8106 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
8107 /* Generic method interface
8108 calls are resolved via a
8109 helper function and don't
8111 if (!cmethod_context || !cmethod_context->method_inst)
8112 pass_imt_from_rgctx = TRUE;
8116 * If a shared method calls another
8117 * shared method then the caller must
8118 * have a generic sharing context
8119 * because the magic trampoline
8120 * requires it. FIXME: We shouldn't
8121 * have to force the vtable/mrgctx
8122 * variable here. Instead there
8123 * should be a flag in the cfg to
8124 * request a generic sharing context.
8127 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
8128 mono_get_vtable_var (cfg);
8133 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
8135 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
8137 CHECK_TYPELOAD (cmethod->klass);
8138 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
8143 g_assert (!vtable_arg);
8145 if (!cfg->compile_aot) {
8147 * emit_get_rgctx_method () calls mono_class_vtable () so check
8148 * for type load errors before.
8150 mono_class_setup_vtable (cmethod->klass);
8151 CHECK_TYPELOAD (cmethod->klass);
8154 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
8156 /* !marshalbyref is needed to properly handle generic methods + remoting */
8157 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
8158 MONO_METHOD_IS_FINAL (cmethod)) &&
8159 !mono_class_is_marshalbyref (cmethod->klass)) {
8166 if (pass_imt_from_rgctx) {
8167 g_assert (!pass_vtable);
8170 imt_arg = emit_get_rgctx_method (cfg, context_used,
8171 cmethod, MONO_RGCTX_INFO_METHOD);
8175 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
8177 /* Calling virtual generic methods */
8178 if (cmethod && virtual &&
8179 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
8180 !(MONO_METHOD_IS_FINAL (cmethod) &&
8181 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
8182 fsig->generic_param_count &&
8183 !(cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))) {
8184 MonoInst *this_temp, *this_arg_temp, *store;
8185 MonoInst *iargs [4];
8186 gboolean use_imt = FALSE;
8188 g_assert (fsig->is_inflated);
8190 /* Prevent inlining of methods that contain indirect calls */
8191 INLINE_FAILURE ("virtual generic call");
8193 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))
8194 GSHAREDVT_FAILURE (*ip);
8196 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK && defined(MONO_ARCH_GSHARED_SUPPORTED)
8197 if (cmethod->wrapper_type == MONO_WRAPPER_NONE && mono_use_imt)
8202 g_assert (!imt_arg);
8204 g_assert (cmethod->is_inflated);
8205 imt_arg = emit_get_rgctx_method (cfg, context_used,
8206 cmethod, MONO_RGCTX_INFO_METHOD);
8207 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, sp [0], imt_arg, NULL);
8209 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
8210 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
8211 MONO_ADD_INS (bblock, store);
8213 /* FIXME: This should be a managed pointer */
8214 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
8216 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
8217 iargs [1] = emit_get_rgctx_method (cfg, context_used,
8218 cmethod, MONO_RGCTX_INFO_METHOD);
8219 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
8220 addr = mono_emit_jit_icall (cfg,
8221 mono_helper_compile_generic_method, iargs);
8223 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
8225 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8232 * Implement a workaround for the inherent races involved in locking:
8238 * If a thread abort happens between the call to Monitor.Enter () and the start of the
8239 * try block, the Exit () won't be executed, see:
8240 * http://www.bluebytesoftware.com/blog/2007/01/30/MonitorEnterThreadAbortsAndOrphanedLocks.aspx
8241 * To work around this, we extend such try blocks to include the last x bytes
8242 * of the Monitor.Enter () call.
8244 if (cmethod && cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
8245 MonoBasicBlock *tbb;
8247 GET_BBLOCK (cfg, tbb, ip + 5);
8249 * Only extend try blocks with a finally, to avoid catching exceptions thrown
8250 * from Monitor.Enter like ArgumentNullException.
8252 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
8253 /* Mark this bblock as needing to be extended */
8254 tbb->extend_try_block = TRUE;
8258 /* Conversion to a JIT intrinsic */
8259 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
8261 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8262 type_to_eval_stack_type ((cfg), fsig->ret, ins);
8269 if (cmethod && (cfg->opt & MONO_OPT_INLINE) &&
8270 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
8271 !disable_inline && mono_method_check_inlining (cfg, cmethod) &&
8272 !g_list_find (dont_inline, cmethod)) {
8274 gboolean always = FALSE;
8276 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
8277 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
8278 /* Prevent inlining of methods that call wrappers */
8279 INLINE_FAILURE ("wrapper call");
8280 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
8284 costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, always);
8286 cfg->real_offset += 5;
8289 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8290 /* *sp is already set by inline_method */
8295 inline_costs += costs;
8301 /* Tail recursion elimination */
8302 if ((cfg->opt & MONO_OPT_TAILC) && call_opcode == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
8303 gboolean has_vtargs = FALSE;
8306 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
8307 INLINE_FAILURE ("tail call");
8309 /* keep it simple */
8310 for (i = fsig->param_count - 1; i >= 0; i--) {
8311 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
8316 for (i = 0; i < n; ++i)
8317 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
8318 MONO_INST_NEW (cfg, ins, OP_BR);
8319 MONO_ADD_INS (bblock, ins);
8320 tblock = start_bblock->out_bb [0];
8321 link_bblock (cfg, bblock, tblock);
8322 ins->inst_target_bb = tblock;
8323 start_new_bblock = 1;
8325 /* skip the CEE_RET, too */
8326 if (ip_in_bb (cfg, bblock, ip + 5))
8333 inline_costs += 10 * num_calls++;
8336 * Making generic calls out of gsharedvt methods.
8338 if (cmethod && cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
8339 MonoRgctxInfoType info_type;
8342 //if (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)
8343 //GSHAREDVT_FAILURE (*ip);
8344 // disable for possible remoting calls
8345 if (fsig->hasthis && (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class))
8346 GSHAREDVT_FAILURE (*ip);
8347 if (fsig->generic_param_count) {
8348 /* virtual generic call */
8349 g_assert (mono_use_imt);
8350 g_assert (!imt_arg);
8351 /* Same as the virtual generic case above */
8352 imt_arg = emit_get_rgctx_method (cfg, context_used,
8353 cmethod, MONO_RGCTX_INFO_METHOD);
8354 /* This is not needed, as the trampoline code will pass one, and it might be passed in the same reg as the imt arg */
8359 if (cmethod->klass->rank && cmethod->klass->byval_arg.type != MONO_TYPE_SZARRAY)
8360 /* test_0_multi_dim_arrays () in gshared.cs */
8361 GSHAREDVT_FAILURE (*ip);
8363 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (cmethod->name, "Invoke")))
8364 keep_this_alive = sp [0];
8366 if (virtual && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))
8367 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE_VIRT;
8369 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE;
8370 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, info_type);
8372 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
8374 } else if (calli && cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
8376 * We pass the address to the gsharedvt trampoline in the rgctx reg
8378 MonoInst *callee = addr;
8380 if (method->wrapper_type != MONO_WRAPPER_DELEGATE_INVOKE)
8382 GSHAREDVT_FAILURE (*ip);
8384 addr = emit_get_rgctx_sig (cfg, context_used,
8385 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
8386 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, callee);
8390 /* Generic sharing */
8391 /* FIXME: only do this for generic methods if
8392 they are not shared! */
8393 if (context_used && !imt_arg && !array_rank && !delegate_invoke &&
8394 (!mono_method_is_generic_sharable (cmethod, TRUE) ||
8395 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
8396 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
8397 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
8398 INLINE_FAILURE ("gshared");
8400 g_assert (cfg->generic_sharing_context && cmethod);
8404 * We are compiling a call to a
8405 * generic method from shared code,
8406 * which means that we have to look up
8407 * the method in the rgctx and do an
8411 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
8413 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
8414 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
8418 /* Indirect calls */
8420 if (call_opcode == CEE_CALL)
8421 g_assert (context_used);
8422 else if (call_opcode == CEE_CALLI)
8423 g_assert (!vtable_arg);
8425 /* FIXME: what the hell is this??? */
8426 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
8427 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
8429 /* Prevent inlining of methods with indirect calls */
8430 INLINE_FAILURE ("indirect call");
8432 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST || addr->opcode == OP_GOT_ENTRY) {
8437 * Instead of emitting an indirect call, emit a direct call
8438 * with the contents of the aotconst as the patch info.
8440 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST) {
8441 info_type = addr->inst_c1;
8442 info_data = addr->inst_p0;
8444 info_type = addr->inst_right->inst_c1;
8445 info_data = addr->inst_right->inst_left;
8448 if (info_type == MONO_PATCH_INFO_ICALL_ADDR || info_type == MONO_PATCH_INFO_JIT_ICALL_ADDR) {
8449 ins = (MonoInst*)mono_emit_abs_call (cfg, info_type, info_data, fsig, sp);
8454 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
8462 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
8463 MonoInst *val = sp [fsig->param_count];
8465 if (val->type == STACK_OBJ) {
8466 MonoInst *iargs [2];
8471 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
8474 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
8475 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, val->dreg);
8476 if (cfg->gen_write_barriers && val->type == STACK_OBJ && !(val->opcode == OP_PCONST && val->inst_c0 == 0))
8477 emit_write_barrier (cfg, addr, val);
8478 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
8479 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
8481 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
8482 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
8483 if (!cmethod->klass->element_class->valuetype && !readonly)
8484 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
8485 CHECK_TYPELOAD (cmethod->klass);
8488 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
8491 g_assert_not_reached ();
8498 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
8502 /* Tail prefix / tail call optimization */
8504 /* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests */
8505 /* FIXME: runtime generic context pointer for jumps? */
8506 /* FIXME: handle this for generic sharing eventually */
8507 if (cmethod && (ins_flag & MONO_INST_TAILCALL) &&
8508 !vtable_arg && !cfg->generic_sharing_context && is_supported_tail_call (cfg, method, cmethod, fsig, call_opcode))
8509 supported_tail_call = TRUE;
8511 if (supported_tail_call) {
8514 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
8515 INLINE_FAILURE ("tail call");
8517 //printf ("HIT: %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
8519 if (ARCH_HAVE_OP_TAIL_CALL) {
8520 /* Handle tail calls similarly to normal calls */
8523 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
8524 call->tail_call = TRUE;
8525 call->method = cmethod;
8526 call->signature = mono_method_signature (cmethod);
8529 * We implement tail calls by storing the actual arguments into the
8530 * argument variables, then emitting a CEE_JMP.
8532 for (i = 0; i < n; ++i) {
8533 /* Prevent argument from being register allocated */
8534 arg_array [i]->flags |= MONO_INST_VOLATILE;
8535 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
8537 ins = (MonoInst*)call;
8538 ins->inst_p0 = cmethod;
8539 ins->inst_p1 = arg_array [0];
8540 MONO_ADD_INS (bblock, ins);
8541 link_bblock (cfg, bblock, end_bblock);
8542 start_new_bblock = 1;
8544 // FIXME: Eliminate unreachable epilogs
8547 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
8548 * only reachable from this call.
8550 GET_BBLOCK (cfg, tblock, ip + 5);
8551 if (tblock == bblock || tblock->in_count == 0)
8560 * Synchronized wrappers.
8561 * Its hard to determine where to replace a method with its synchronized
8562 * wrapper without causing an infinite recursion. The current solution is
8563 * to add the synchronized wrapper in the trampolines, and to
8564 * change the called method to a dummy wrapper, and resolve that wrapper
8565 * to the real method in mono_jit_compile_method ().
8567 if (cfg->method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
8568 MonoMethod *orig = mono_marshal_method_from_wrapper (cfg->method);
8569 if (cmethod == orig || (cmethod->is_inflated && mono_method_get_declaring_generic_method (cmethod) == orig))
8570 cmethod = mono_marshal_get_synchronized_inner_wrapper (cmethod);
8574 INLINE_FAILURE ("call");
8575 ins = mono_emit_method_call_full (cfg, cmethod, fsig, tail_call, sp, virtual ? sp [0] : NULL,
8576 imt_arg, vtable_arg);
8579 link_bblock (cfg, bblock, end_bblock);
8580 start_new_bblock = 1;
8582 // FIXME: Eliminate unreachable epilogs
8585 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
8586 * only reachable from this call.
8588 GET_BBLOCK (cfg, tblock, ip + 5);
8589 if (tblock == bblock || tblock->in_count == 0)
8596 /* End of call, INS should contain the result of the call, if any */
8598 if (push_res && !MONO_TYPE_IS_VOID (fsig->ret)) {
8601 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
8606 if (keep_this_alive) {
8607 MonoInst *dummy_use;
8609 /* See mono_emit_method_call_full () */
8610 EMIT_NEW_DUMMY_USE (cfg, dummy_use, keep_this_alive);
8613 CHECK_CFG_EXCEPTION;
8617 g_assert (*ip == CEE_RET);
8621 constrained_call = NULL;
8623 emit_seq_point (cfg, method, ip, FALSE, TRUE);
8627 if (cfg->method != method) {
8628 /* return from inlined method */
8630 * If in_count == 0, that means the ret is unreachable due to
8631 * being preceeded by a throw. In that case, inline_method () will
8632 * handle setting the return value
8633 * (test case: test_0_inline_throw ()).
8635 if (return_var && cfg->cbb->in_count) {
8636 MonoType *ret_type = mono_method_signature (method)->ret;
8642 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
8645 //g_assert (returnvar != -1);
8646 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
8647 cfg->ret_var_set = TRUE;
8650 if (cfg->lmf_var && cfg->cbb->in_count)
8654 MonoType *ret_type = mini_replace_type (mono_method_signature (method)->ret);
8656 if (seq_points && !sym_seq_points) {
8658 * Place a seq point here too even through the IL stack is not
8659 * empty, so a step over on
8662 * will work correctly.
8664 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
8665 MONO_ADD_INS (cfg->cbb, ins);
8668 g_assert (!return_var);
8672 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
8675 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
8678 if (!cfg->vret_addr) {
8681 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
8683 EMIT_NEW_RETLOADA (cfg, ret_addr);
8685 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
8686 ins->klass = mono_class_from_mono_type (ret_type);
8689 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
8690 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
8691 MonoInst *iargs [1];
8695 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
8696 mono_arch_emit_setret (cfg, method, conv);
8698 mono_arch_emit_setret (cfg, method, *sp);
8701 mono_arch_emit_setret (cfg, method, *sp);
8706 if (sp != stack_start)
8708 MONO_INST_NEW (cfg, ins, OP_BR);
8710 ins->inst_target_bb = end_bblock;
8711 MONO_ADD_INS (bblock, ins);
8712 link_bblock (cfg, bblock, end_bblock);
8713 start_new_bblock = 1;
8717 MONO_INST_NEW (cfg, ins, OP_BR);
8719 target = ip + 1 + (signed char)(*ip);
8721 GET_BBLOCK (cfg, tblock, target);
8722 link_bblock (cfg, bblock, tblock);
8723 ins->inst_target_bb = tblock;
8724 if (sp != stack_start) {
8725 handle_stack_args (cfg, stack_start, sp - stack_start);
8727 CHECK_UNVERIFIABLE (cfg);
8729 MONO_ADD_INS (bblock, ins);
8730 start_new_bblock = 1;
8731 inline_costs += BRANCH_COST;
8745 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
8747 target = ip + 1 + *(signed char*)ip;
8753 inline_costs += BRANCH_COST;
8757 MONO_INST_NEW (cfg, ins, OP_BR);
8760 target = ip + 4 + (gint32)read32(ip);
8762 GET_BBLOCK (cfg, tblock, target);
8763 link_bblock (cfg, bblock, tblock);
8764 ins->inst_target_bb = tblock;
8765 if (sp != stack_start) {
8766 handle_stack_args (cfg, stack_start, sp - stack_start);
8768 CHECK_UNVERIFIABLE (cfg);
8771 MONO_ADD_INS (bblock, ins);
8773 start_new_bblock = 1;
8774 inline_costs += BRANCH_COST;
8781 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
8782 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
8783 guint32 opsize = is_short ? 1 : 4;
8785 CHECK_OPSIZE (opsize);
8787 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
8790 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
8795 GET_BBLOCK (cfg, tblock, target);
8796 link_bblock (cfg, bblock, tblock);
8797 GET_BBLOCK (cfg, tblock, ip);
8798 link_bblock (cfg, bblock, tblock);
8800 if (sp != stack_start) {
8801 handle_stack_args (cfg, stack_start, sp - stack_start);
8802 CHECK_UNVERIFIABLE (cfg);
8805 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
8806 cmp->sreg1 = sp [0]->dreg;
8807 type_from_op (cmp, sp [0], NULL);
8810 #if SIZEOF_REGISTER == 4
8811 if (cmp->opcode == OP_LCOMPARE_IMM) {
8812 /* Convert it to OP_LCOMPARE */
8813 MONO_INST_NEW (cfg, ins, OP_I8CONST);
8814 ins->type = STACK_I8;
8815 ins->dreg = alloc_dreg (cfg, STACK_I8);
8817 MONO_ADD_INS (bblock, ins);
8818 cmp->opcode = OP_LCOMPARE;
8819 cmp->sreg2 = ins->dreg;
8822 MONO_ADD_INS (bblock, cmp);
8824 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
8825 type_from_op (ins, sp [0], NULL);
8826 MONO_ADD_INS (bblock, ins);
8827 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
8828 GET_BBLOCK (cfg, tblock, target);
8829 ins->inst_true_bb = tblock;
8830 GET_BBLOCK (cfg, tblock, ip);
8831 ins->inst_false_bb = tblock;
8832 start_new_bblock = 2;
8835 inline_costs += BRANCH_COST;
8850 MONO_INST_NEW (cfg, ins, *ip);
8852 target = ip + 4 + (gint32)read32(ip);
8858 inline_costs += BRANCH_COST;
8862 MonoBasicBlock **targets;
8863 MonoBasicBlock *default_bblock;
8864 MonoJumpInfoBBTable *table;
8865 int offset_reg = alloc_preg (cfg);
8866 int target_reg = alloc_preg (cfg);
8867 int table_reg = alloc_preg (cfg);
8868 int sum_reg = alloc_preg (cfg);
8869 gboolean use_op_switch;
8873 n = read32 (ip + 1);
8876 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
8880 CHECK_OPSIZE (n * sizeof (guint32));
8881 target = ip + n * sizeof (guint32);
8883 GET_BBLOCK (cfg, default_bblock, target);
8884 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
8886 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
8887 for (i = 0; i < n; ++i) {
8888 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
8889 targets [i] = tblock;
8890 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
8894 if (sp != stack_start) {
8896 * Link the current bb with the targets as well, so handle_stack_args
8897 * will set their in_stack correctly.
8899 link_bblock (cfg, bblock, default_bblock);
8900 for (i = 0; i < n; ++i)
8901 link_bblock (cfg, bblock, targets [i]);
8903 handle_stack_args (cfg, stack_start, sp - stack_start);
8905 CHECK_UNVERIFIABLE (cfg);
8908 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
8909 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
8912 for (i = 0; i < n; ++i)
8913 link_bblock (cfg, bblock, targets [i]);
8915 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
8916 table->table = targets;
8917 table->table_size = n;
8919 use_op_switch = FALSE;
8921 /* ARM implements SWITCH statements differently */
8922 /* FIXME: Make it use the generic implementation */
8923 if (!cfg->compile_aot)
8924 use_op_switch = TRUE;
8927 if (COMPILE_LLVM (cfg))
8928 use_op_switch = TRUE;
8930 cfg->cbb->has_jump_table = 1;
8932 if (use_op_switch) {
8933 MONO_INST_NEW (cfg, ins, OP_SWITCH);
8934 ins->sreg1 = src1->dreg;
8935 ins->inst_p0 = table;
8936 ins->inst_many_bb = targets;
8937 ins->klass = GUINT_TO_POINTER (n);
8938 MONO_ADD_INS (cfg->cbb, ins);
8940 if (sizeof (gpointer) == 8)
8941 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
8943 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
8945 #if SIZEOF_REGISTER == 8
8946 /* The upper word might not be zero, and we add it to a 64 bit address later */
8947 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
8950 if (cfg->compile_aot) {
8951 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
8953 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
8954 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
8955 ins->inst_p0 = table;
8956 ins->dreg = table_reg;
8957 MONO_ADD_INS (cfg->cbb, ins);
8960 /* FIXME: Use load_memindex */
8961 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
8962 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
8963 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
8965 start_new_bblock = 1;
8966 inline_costs += (BRANCH_COST * 2);
8986 dreg = alloc_freg (cfg);
8989 dreg = alloc_lreg (cfg);
8992 dreg = alloc_ireg_ref (cfg);
8995 dreg = alloc_preg (cfg);
8998 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
8999 ins->type = ldind_type [*ip - CEE_LDIND_I1];
9000 ins->flags |= ins_flag;
9002 MONO_ADD_INS (bblock, ins);
9004 if (ins->flags & MONO_INST_VOLATILE) {
9005 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
9006 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
9007 emit_memory_barrier (cfg, FullBarrier);
9022 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
9023 ins->flags |= ins_flag;
9026 if (ins->flags & MONO_INST_VOLATILE) {
9027 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
9028 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
9029 emit_memory_barrier (cfg, FullBarrier);
9032 MONO_ADD_INS (bblock, ins);
9034 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0)))
9035 emit_write_barrier (cfg, sp [0], sp [1]);
9044 MONO_INST_NEW (cfg, ins, (*ip));
9046 ins->sreg1 = sp [0]->dreg;
9047 ins->sreg2 = sp [1]->dreg;
9048 type_from_op (ins, sp [0], sp [1]);
9050 ins->dreg = alloc_dreg ((cfg), (ins)->type);
9052 /* Use the immediate opcodes if possible */
9053 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
9054 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
9055 if (imm_opcode != -1) {
9056 ins->opcode = imm_opcode;
9057 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
9060 sp [1]->opcode = OP_NOP;
9064 MONO_ADD_INS ((cfg)->cbb, (ins));
9066 *sp++ = mono_decompose_opcode (cfg, ins);
9083 MONO_INST_NEW (cfg, ins, (*ip));
9085 ins->sreg1 = sp [0]->dreg;
9086 ins->sreg2 = sp [1]->dreg;
9087 type_from_op (ins, sp [0], sp [1]);
9089 ADD_WIDEN_OP (ins, sp [0], sp [1]);
9090 ins->dreg = alloc_dreg ((cfg), (ins)->type);
9092 /* FIXME: Pass opcode to is_inst_imm */
9094 /* Use the immediate opcodes if possible */
9095 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
9098 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
9099 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
9100 /* Keep emulated opcodes which are optimized away later */
9101 if ((ins->opcode == OP_IREM_UN || ins->opcode == OP_IDIV_UN_IMM) && (cfg->opt & (MONO_OPT_CONSPROP | MONO_OPT_COPYPROP)) && sp [1]->opcode == OP_ICONST && mono_is_power_of_two (sp [1]->inst_c0) >= 0) {
9102 imm_opcode = mono_op_to_op_imm (ins->opcode);
9105 if (imm_opcode != -1) {
9106 ins->opcode = imm_opcode;
9107 if (sp [1]->opcode == OP_I8CONST) {
9108 #if SIZEOF_REGISTER == 8
9109 ins->inst_imm = sp [1]->inst_l;
9111 ins->inst_ls_word = sp [1]->inst_ls_word;
9112 ins->inst_ms_word = sp [1]->inst_ms_word;
9116 ins->inst_imm = (gssize)(sp [1]->inst_c0);
9119 /* Might be followed by an instruction added by ADD_WIDEN_OP */
9120 if (sp [1]->next == NULL)
9121 sp [1]->opcode = OP_NOP;
9124 MONO_ADD_INS ((cfg)->cbb, (ins));
9126 *sp++ = mono_decompose_opcode (cfg, ins);
9139 case CEE_CONV_OVF_I8:
9140 case CEE_CONV_OVF_U8:
9144 /* Special case this earlier so we have long constants in the IR */
9145 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
9146 int data = sp [-1]->inst_c0;
9147 sp [-1]->opcode = OP_I8CONST;
9148 sp [-1]->type = STACK_I8;
9149 #if SIZEOF_REGISTER == 8
9150 if ((*ip) == CEE_CONV_U8)
9151 sp [-1]->inst_c0 = (guint32)data;
9153 sp [-1]->inst_c0 = data;
9155 sp [-1]->inst_ls_word = data;
9156 if ((*ip) == CEE_CONV_U8)
9157 sp [-1]->inst_ms_word = 0;
9159 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
9161 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
9168 case CEE_CONV_OVF_I4:
9169 case CEE_CONV_OVF_I1:
9170 case CEE_CONV_OVF_I2:
9171 case CEE_CONV_OVF_I:
9172 case CEE_CONV_OVF_U:
9175 if (sp [-1]->type == STACK_R8) {
9176 ADD_UNOP (CEE_CONV_OVF_I8);
9183 case CEE_CONV_OVF_U1:
9184 case CEE_CONV_OVF_U2:
9185 case CEE_CONV_OVF_U4:
9188 if (sp [-1]->type == STACK_R8) {
9189 ADD_UNOP (CEE_CONV_OVF_U8);
9196 case CEE_CONV_OVF_I1_UN:
9197 case CEE_CONV_OVF_I2_UN:
9198 case CEE_CONV_OVF_I4_UN:
9199 case CEE_CONV_OVF_I8_UN:
9200 case CEE_CONV_OVF_U1_UN:
9201 case CEE_CONV_OVF_U2_UN:
9202 case CEE_CONV_OVF_U4_UN:
9203 case CEE_CONV_OVF_U8_UN:
9204 case CEE_CONV_OVF_I_UN:
9205 case CEE_CONV_OVF_U_UN:
9212 CHECK_CFG_EXCEPTION;
9216 case CEE_ADD_OVF_UN:
9218 case CEE_MUL_OVF_UN:
9220 case CEE_SUB_OVF_UN:
9226 GSHAREDVT_FAILURE (*ip);
9229 token = read32 (ip + 1);
9230 klass = mini_get_class (method, token, generic_context);
9231 CHECK_TYPELOAD (klass);
9233 if (generic_class_is_reference_type (cfg, klass)) {
9234 MonoInst *store, *load;
9235 int dreg = alloc_ireg_ref (cfg);
9237 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
9238 load->flags |= ins_flag;
9239 MONO_ADD_INS (cfg->cbb, load);
9241 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
9242 store->flags |= ins_flag;
9243 MONO_ADD_INS (cfg->cbb, store);
9245 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER)
9246 emit_write_barrier (cfg, sp [0], sp [1]);
9248 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
9260 token = read32 (ip + 1);
9261 klass = mini_get_class (method, token, generic_context);
9262 CHECK_TYPELOAD (klass);
9264 /* Optimize the common ldobj+stloc combination */
9274 loc_index = ip [5] - CEE_STLOC_0;
9281 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
9282 CHECK_LOCAL (loc_index);
9284 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
9285 ins->dreg = cfg->locals [loc_index]->dreg;
9291 /* Optimize the ldobj+stobj combination */
9292 /* The reference case ends up being a load+store anyway */
9293 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass)) {
9298 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
9305 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
9314 CHECK_STACK_OVF (1);
9316 n = read32 (ip + 1);
9318 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
9319 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
9320 ins->type = STACK_OBJ;
9323 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
9324 MonoInst *iargs [1];
9326 EMIT_NEW_PCONST (cfg, iargs [0], mono_method_get_wrapper_data (method, n));
9327 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
9329 if (cfg->opt & MONO_OPT_SHARED) {
9330 MonoInst *iargs [3];
9332 if (cfg->compile_aot) {
9333 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
9335 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
9336 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
9337 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
9338 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
9339 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
9341 if (bblock->out_of_line) {
9342 MonoInst *iargs [2];
9344 if (image == mono_defaults.corlib) {
9346 * Avoid relocations in AOT and save some space by using a
9347 * version of helper_ldstr specialized to mscorlib.
9349 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
9350 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
9352 /* Avoid creating the string object */
9353 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
9354 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
9355 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
9359 if (cfg->compile_aot) {
9360 NEW_LDSTRCONST (cfg, ins, image, n);
9362 MONO_ADD_INS (bblock, ins);
9365 NEW_PCONST (cfg, ins, NULL);
9366 ins->type = STACK_OBJ;
9367 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
9369 OUT_OF_MEMORY_FAILURE;
9372 MONO_ADD_INS (bblock, ins);
9381 MonoInst *iargs [2];
9382 MonoMethodSignature *fsig;
9385 MonoInst *vtable_arg = NULL;
9388 token = read32 (ip + 1);
9389 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
9390 if (!cmethod || mono_loader_get_last_error ())
9392 fsig = mono_method_get_signature (cmethod, image, token);
9396 mono_save_token_info (cfg, image, token, cmethod);
9398 if (!mono_class_init (cmethod->klass))
9399 TYPE_LOAD_ERROR (cmethod->klass);
9401 context_used = mini_method_check_context_used (cfg, cmethod);
9403 if (mono_security_cas_enabled ()) {
9404 if (check_linkdemand (cfg, method, cmethod))
9405 INLINE_FAILURE ("linkdemand");
9406 CHECK_CFG_EXCEPTION;
9407 } else if (mono_security_core_clr_enabled ()) {
9408 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9411 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
9412 emit_generic_class_init (cfg, cmethod->klass);
9413 CHECK_TYPELOAD (cmethod->klass);
9417 if (cfg->gsharedvt) {
9418 if (mini_is_gsharedvt_variable_signature (sig))
9419 GSHAREDVT_FAILURE (*ip);
9423 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
9424 mono_method_is_generic_sharable (cmethod, TRUE)) {
9425 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
9426 mono_class_vtable (cfg->domain, cmethod->klass);
9427 CHECK_TYPELOAD (cmethod->klass);
9429 vtable_arg = emit_get_rgctx_method (cfg, context_used,
9430 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
9433 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
9434 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
9436 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
9438 CHECK_TYPELOAD (cmethod->klass);
9439 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
9444 n = fsig->param_count;
9448 * Generate smaller code for the common newobj <exception> instruction in
9449 * argument checking code.
9451 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
9452 is_exception_class (cmethod->klass) && n <= 2 &&
9453 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
9454 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
9455 MonoInst *iargs [3];
9457 g_assert (!vtable_arg);
9461 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
9464 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
9468 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
9473 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
9476 g_assert_not_reached ();
9484 /* move the args to allow room for 'this' in the first position */
9490 /* check_call_signature () requires sp[0] to be set */
9491 this_ins.type = STACK_OBJ;
9493 if (check_call_signature (cfg, fsig, sp))
9498 if (mini_class_is_system_array (cmethod->klass)) {
9499 g_assert (!vtable_arg);
9501 *sp = emit_get_rgctx_method (cfg, context_used,
9502 cmethod, MONO_RGCTX_INFO_METHOD);
9504 /* Avoid varargs in the common case */
9505 if (fsig->param_count == 1)
9506 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
9507 else if (fsig->param_count == 2)
9508 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
9509 else if (fsig->param_count == 3)
9510 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
9511 else if (fsig->param_count == 4)
9512 alloc = mono_emit_jit_icall (cfg, mono_array_new_4, sp);
9514 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
9515 } else if (cmethod->string_ctor) {
9516 g_assert (!context_used);
9517 g_assert (!vtable_arg);
9518 /* we simply pass a null pointer */
9519 EMIT_NEW_PCONST (cfg, *sp, NULL);
9520 /* now call the string ctor */
9521 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, NULL, NULL, NULL);
9523 MonoInst* callvirt_this_arg = NULL;
9525 if (cmethod->klass->valuetype) {
9526 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
9527 emit_init_rvar (cfg, iargs [0]->dreg, &cmethod->klass->byval_arg);
9528 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
9533 * The code generated by mini_emit_virtual_call () expects
9534 * iargs [0] to be a boxed instance, but luckily the vcall
9535 * will be transformed into a normal call there.
9537 } else if (context_used) {
9538 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
9541 MonoVTable *vtable = NULL;
9543 if (!cfg->compile_aot)
9544 vtable = mono_class_vtable (cfg->domain, cmethod->klass);
9545 CHECK_TYPELOAD (cmethod->klass);
9548 * TypeInitializationExceptions thrown from the mono_runtime_class_init
9549 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
9550 * As a workaround, we call class cctors before allocating objects.
9552 if (mini_field_access_needs_cctor_run (cfg, method, cmethod->klass, vtable) && !(g_slist_find (class_inits, cmethod->klass))) {
9553 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, cmethod->klass, helper_sig_class_init_trampoline, NULL);
9554 if (cfg->verbose_level > 2)
9555 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
9556 class_inits = g_slist_prepend (class_inits, cmethod->klass);
9559 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
9562 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
9565 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
9567 /* Now call the actual ctor */
9568 /* Avoid virtual calls to ctors if possible */
9569 if (mono_class_is_marshalbyref (cmethod->klass))
9570 callvirt_this_arg = sp [0];
9573 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
9574 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9575 type_to_eval_stack_type ((cfg), fsig->ret, ins);
9580 CHECK_CFG_EXCEPTION;
9581 } else if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
9582 !disable_inline && mono_method_check_inlining (cfg, cmethod) &&
9583 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE) &&
9584 !g_list_find (dont_inline, cmethod)) {
9587 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, FALSE))) {
9588 cfg->real_offset += 5;
9591 inline_costs += costs - 5;
9593 INLINE_FAILURE ("inline failure");
9594 // FIXME-VT: Clean this up
9595 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))
9596 GSHAREDVT_FAILURE(*ip);
9597 mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, callvirt_this_arg, NULL, NULL);
9599 } else if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
9602 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE);
9603 mono_emit_calli (cfg, fsig, sp, addr, NULL, vtable_arg);
9604 } else if (context_used &&
9605 (!mono_method_is_generic_sharable (cmethod, TRUE) ||
9606 !mono_class_generic_sharing_enabled (cmethod->klass))) {
9607 MonoInst *cmethod_addr;
9609 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
9610 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
9612 mono_emit_calli (cfg, fsig, sp, cmethod_addr, NULL, vtable_arg);
9614 INLINE_FAILURE ("ctor call");
9615 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp,
9616 callvirt_this_arg, NULL, vtable_arg);
9620 if (alloc == NULL) {
9622 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
9623 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
9637 token = read32 (ip + 1);
9638 klass = mini_get_class (method, token, generic_context);
9639 CHECK_TYPELOAD (klass);
9640 if (sp [0]->type != STACK_OBJ)
9643 context_used = mini_class_check_context_used (cfg, klass);
9645 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
9652 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
9655 if (cfg->compile_aot)
9656 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
9658 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
9660 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
9662 *sp++ = emit_castclass_with_cache (cfg, klass, args, &bblock);
9665 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
9666 MonoMethod *mono_castclass;
9667 MonoInst *iargs [1];
9670 mono_castclass = mono_marshal_get_castclass (klass);
9673 save_cast_details (cfg, klass, sp [0]->dreg, TRUE, &bblock);
9674 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
9675 iargs, ip, cfg->real_offset, dont_inline, TRUE);
9676 reset_cast_details (cfg);
9677 CHECK_CFG_EXCEPTION;
9678 g_assert (costs > 0);
9681 cfg->real_offset += 5;
9686 inline_costs += costs;
9689 ins = handle_castclass (cfg, klass, *sp, context_used);
9690 CHECK_CFG_EXCEPTION;
9700 token = read32 (ip + 1);
9701 klass = mini_get_class (method, token, generic_context);
9702 CHECK_TYPELOAD (klass);
9703 if (sp [0]->type != STACK_OBJ)
9706 context_used = mini_class_check_context_used (cfg, klass);
9708 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
9709 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
9716 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
9719 if (cfg->compile_aot)
9720 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
9722 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
9724 *sp++ = mono_emit_method_call (cfg, mono_isinst, args, NULL);
9727 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
9728 MonoMethod *mono_isinst;
9729 MonoInst *iargs [1];
9732 mono_isinst = mono_marshal_get_isinst (klass);
9735 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
9736 iargs, ip, cfg->real_offset, dont_inline, TRUE);
9737 CHECK_CFG_EXCEPTION;
9738 g_assert (costs > 0);
9741 cfg->real_offset += 5;
9746 inline_costs += costs;
9749 ins = handle_isinst (cfg, klass, *sp, context_used);
9750 CHECK_CFG_EXCEPTION;
9757 case CEE_UNBOX_ANY: {
9761 token = read32 (ip + 1);
9762 klass = mini_get_class (method, token, generic_context);
9763 CHECK_TYPELOAD (klass);
9765 mono_save_token_info (cfg, image, token, klass);
9767 context_used = mini_class_check_context_used (cfg, klass);
9769 if (mini_is_gsharedvt_klass (cfg, klass)) {
9770 *sp = handle_unbox_gsharedvt (cfg, klass, *sp, &bblock);
9778 if (generic_class_is_reference_type (cfg, klass)) {
9779 /* CASTCLASS FIXME kill this huge slice of duplicated code*/
9780 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
9787 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
9790 /*FIXME AOT support*/
9791 if (cfg->compile_aot)
9792 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
9794 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
9796 /* The wrapper doesn't inline well so the bloat of inlining doesn't pay off. */
9797 *sp++ = emit_castclass_with_cache (cfg, klass, args, &bblock);
9800 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
9801 MonoMethod *mono_castclass;
9802 MonoInst *iargs [1];
9805 mono_castclass = mono_marshal_get_castclass (klass);
9808 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
9809 iargs, ip, cfg->real_offset, dont_inline, TRUE);
9810 CHECK_CFG_EXCEPTION;
9811 g_assert (costs > 0);
9814 cfg->real_offset += 5;
9818 inline_costs += costs;
9820 ins = handle_castclass (cfg, klass, *sp, context_used);
9821 CHECK_CFG_EXCEPTION;
9829 if (mono_class_is_nullable (klass)) {
9830 ins = handle_unbox_nullable (cfg, *sp, klass, context_used);
9837 ins = handle_unbox (cfg, klass, sp, context_used);
9843 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
9856 token = read32 (ip + 1);
9857 klass = mini_get_class (method, token, generic_context);
9858 CHECK_TYPELOAD (klass);
9860 mono_save_token_info (cfg, image, token, klass);
9862 context_used = mini_class_check_context_used (cfg, klass);
9864 if (generic_class_is_reference_type (cfg, klass)) {
9870 if (klass == mono_defaults.void_class)
9872 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
9874 /* frequent check in generic code: box (struct), brtrue */
9876 // FIXME: LLVM can't handle the inconsistent bb linking
9877 if (!mono_class_is_nullable (klass) &&
9878 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) &&
9879 (ip [5] == CEE_BRTRUE ||
9880 ip [5] == CEE_BRTRUE_S ||
9881 ip [5] == CEE_BRFALSE ||
9882 ip [5] == CEE_BRFALSE_S)) {
9883 gboolean is_true = ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S;
9885 MonoBasicBlock *true_bb, *false_bb;
9889 if (cfg->verbose_level > 3) {
9890 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
9891 printf ("<box+brtrue opt>\n");
9899 target = ip + 1 + (signed char)(*ip);
9906 target = ip + 4 + (gint)(read32 (ip));
9910 g_assert_not_reached ();
9914 * We need to link both bblocks, since it is needed for handling stack
9915 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
9916 * Branching to only one of them would lead to inconsistencies, so
9917 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
9919 GET_BBLOCK (cfg, true_bb, target);
9920 GET_BBLOCK (cfg, false_bb, ip);
9922 mono_link_bblock (cfg, cfg->cbb, true_bb);
9923 mono_link_bblock (cfg, cfg->cbb, false_bb);
9925 if (sp != stack_start) {
9926 handle_stack_args (cfg, stack_start, sp - stack_start);
9928 CHECK_UNVERIFIABLE (cfg);
9931 if (COMPILE_LLVM (cfg)) {
9932 dreg = alloc_ireg (cfg);
9933 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
9934 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
9936 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
9938 /* The JIT can't eliminate the iconst+compare */
9939 MONO_INST_NEW (cfg, ins, OP_BR);
9940 ins->inst_target_bb = is_true ? true_bb : false_bb;
9941 MONO_ADD_INS (cfg->cbb, ins);
9944 start_new_bblock = 1;
9948 *sp++ = handle_box (cfg, val, klass, context_used, &bblock);
9950 CHECK_CFG_EXCEPTION;
9959 token = read32 (ip + 1);
9960 klass = mini_get_class (method, token, generic_context);
9961 CHECK_TYPELOAD (klass);
9963 mono_save_token_info (cfg, image, token, klass);
9965 context_used = mini_class_check_context_used (cfg, klass);
9967 if (mono_class_is_nullable (klass)) {
9970 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
9971 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
9975 ins = handle_unbox (cfg, klass, sp, context_used);
9988 MonoClassField *field;
9989 #ifndef DISABLE_REMOTING
9993 gboolean is_instance;
9995 gpointer addr = NULL;
9996 gboolean is_special_static;
9998 MonoInst *store_val = NULL;
9999 MonoInst *thread_ins;
10002 is_instance = (op == CEE_LDFLD || op == CEE_LDFLDA || op == CEE_STFLD);
10004 if (op == CEE_STFLD) {
10007 store_val = sp [1];
10012 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
10014 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
10017 if (op == CEE_STSFLD) {
10020 store_val = sp [0];
10025 token = read32 (ip + 1);
10026 if (method->wrapper_type != MONO_WRAPPER_NONE) {
10027 field = mono_method_get_wrapper_data (method, token);
10028 klass = field->parent;
10031 field = mono_field_from_token (image, token, &klass, generic_context);
10035 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
10036 FIELD_ACCESS_FAILURE;
10037 mono_class_init (klass);
10039 if (is_instance && *ip != CEE_LDFLDA && is_magic_tls_access (field))
10042 /* if the class is Critical then transparent code cannot access it's fields */
10043 if (!is_instance && mono_security_core_clr_enabled ())
10044 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
10046 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
10047 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
10048 if (mono_security_core_clr_enabled ())
10049 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
10053 * LDFLD etc. is usable on static fields as well, so convert those cases to
10056 if (is_instance && field->type->attrs & FIELD_ATTRIBUTE_STATIC) {
10068 g_assert_not_reached ();
10070 is_instance = FALSE;
10073 context_used = mini_class_check_context_used (cfg, klass);
10075 /* INSTANCE CASE */
10077 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
10078 if (op == CEE_STFLD) {
10079 if (target_type_is_incompatible (cfg, field->type, sp [1]))
10081 #ifndef DISABLE_REMOTING
10082 if ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class) {
10083 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
10084 MonoInst *iargs [5];
10086 GSHAREDVT_FAILURE (op);
10088 iargs [0] = sp [0];
10089 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10090 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
10091 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
10093 iargs [4] = sp [1];
10095 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
10096 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
10097 iargs, ip, cfg->real_offset, dont_inline, TRUE);
10098 CHECK_CFG_EXCEPTION;
10099 g_assert (costs > 0);
10101 cfg->real_offset += 5;
10104 inline_costs += costs;
10106 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
10113 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
10115 if (mini_is_gsharedvt_klass (cfg, klass)) {
10116 MonoInst *offset_ins;
10118 context_used = mini_class_check_context_used (cfg, klass);
10120 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10121 dreg = alloc_ireg_mp (cfg);
10122 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10123 /* The decomposition will call mini_emit_stobj () which will emit a wbarrier if needed */
10124 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, dreg, 0, sp [1]->dreg);
10126 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
10128 if (sp [0]->opcode != OP_LDADDR)
10129 store->flags |= MONO_INST_FAULT;
10131 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
10132 /* insert call to write barrier */
10136 dreg = alloc_ireg_mp (cfg);
10137 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
10138 emit_write_barrier (cfg, ptr, sp [1]);
10141 store->flags |= ins_flag;
10148 #ifndef DISABLE_REMOTING
10149 if (is_instance && ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class)) {
10150 MonoMethod *wrapper = (op == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
10151 MonoInst *iargs [4];
10153 GSHAREDVT_FAILURE (op);
10155 iargs [0] = sp [0];
10156 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10157 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
10158 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
10159 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
10160 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
10161 iargs, ip, cfg->real_offset, dont_inline, TRUE);
10162 CHECK_CFG_EXCEPTION;
10164 g_assert (costs > 0);
10166 cfg->real_offset += 5;
10170 inline_costs += costs;
10172 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
10178 if (sp [0]->type == STACK_VTYPE) {
10181 /* Have to compute the address of the variable */
10183 var = get_vreg_to_inst (cfg, sp [0]->dreg);
10185 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
10187 g_assert (var->klass == klass);
10189 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
10193 if (op == CEE_LDFLDA) {
10194 if (is_magic_tls_access (field)) {
10195 GSHAREDVT_FAILURE (*ip);
10197 *sp++ = create_magic_tls_access (cfg, field, &cached_tls_addr, ins);
10199 if (sp [0]->type == STACK_OBJ) {
10200 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
10201 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
10204 dreg = alloc_ireg_mp (cfg);
10206 if (mini_is_gsharedvt_klass (cfg, klass)) {
10207 MonoInst *offset_ins;
10209 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10210 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10212 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
10214 ins->klass = mono_class_from_mono_type (field->type);
10215 ins->type = STACK_MP;
10221 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
10223 if (mini_is_gsharedvt_klass (cfg, klass)) {
10224 MonoInst *offset_ins;
10226 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10227 dreg = alloc_ireg_mp (cfg);
10228 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10229 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, dreg, 0);
10231 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
10233 load->flags |= ins_flag;
10234 if (sp [0]->opcode != OP_LDADDR)
10235 load->flags |= MONO_INST_FAULT;
10249 * We can only support shared generic static
10250 * field access on architectures where the
10251 * trampoline code has been extended to handle
10252 * the generic class init.
10254 #ifndef MONO_ARCH_VTABLE_REG
10255 GENERIC_SHARING_FAILURE (op);
10258 context_used = mini_class_check_context_used (cfg, klass);
10260 ftype = mono_field_get_type (field);
10262 if (ftype->attrs & FIELD_ATTRIBUTE_LITERAL)
10265 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
10266 * to be called here.
10268 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
10269 mono_class_vtable (cfg->domain, klass);
10270 CHECK_TYPELOAD (klass);
10272 mono_domain_lock (cfg->domain);
10273 if (cfg->domain->special_static_fields)
10274 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
10275 mono_domain_unlock (cfg->domain);
10277 is_special_static = mono_class_field_is_special_static (field);
10279 if (is_special_static && ((gsize)addr & 0x80000000) == 0)
10280 thread_ins = mono_get_thread_intrinsic (cfg);
10284 /* Generate IR to compute the field address */
10285 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && thread_ins && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
10287 * Fast access to TLS data
10288 * Inline version of get_thread_static_data () in
10292 int idx, static_data_reg, array_reg, dreg;
10294 GSHAREDVT_FAILURE (op);
10296 // offset &= 0x7fffffff;
10297 // idx = (offset >> 24) - 1;
10298 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
10299 MONO_ADD_INS (cfg->cbb, thread_ins);
10300 static_data_reg = alloc_ireg (cfg);
10301 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, G_STRUCT_OFFSET (MonoInternalThread, static_data));
10303 if (cfg->compile_aot) {
10304 int offset_reg, offset2_reg, idx_reg;
10306 /* For TLS variables, this will return the TLS offset */
10307 EMIT_NEW_SFLDACONST (cfg, ins, field);
10308 offset_reg = ins->dreg;
10309 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
10310 idx_reg = alloc_ireg (cfg);
10311 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
10312 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
10313 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
10314 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
10315 array_reg = alloc_ireg (cfg);
10316 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
10317 offset2_reg = alloc_ireg (cfg);
10318 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
10319 dreg = alloc_ireg (cfg);
10320 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
10322 offset = (gsize)addr & 0x7fffffff;
10323 idx = (offset >> 24) - 1;
10325 array_reg = alloc_ireg (cfg);
10326 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
10327 dreg = alloc_ireg (cfg);
10328 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, (offset & 0xffffff));
10330 } else if ((cfg->opt & MONO_OPT_SHARED) ||
10331 (cfg->compile_aot && is_special_static) ||
10332 (context_used && is_special_static)) {
10333 MonoInst *iargs [2];
10335 g_assert (field->parent);
10336 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10337 if (context_used) {
10338 iargs [1] = emit_get_rgctx_field (cfg, context_used,
10339 field, MONO_RGCTX_INFO_CLASS_FIELD);
10341 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
10343 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
10344 } else if (context_used) {
10345 MonoInst *static_data;
10348 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
10349 method->klass->name_space, method->klass->name, method->name,
10350 depth, field->offset);
10353 if (mono_class_needs_cctor_run (klass, method))
10354 emit_generic_class_init (cfg, klass);
10357 * The pointer we're computing here is
10359 * super_info.static_data + field->offset
10361 static_data = emit_get_rgctx_klass (cfg, context_used,
10362 klass, MONO_RGCTX_INFO_STATIC_DATA);
10364 if (mini_is_gsharedvt_klass (cfg, klass)) {
10365 MonoInst *offset_ins;
10367 offset_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10368 dreg = alloc_ireg_mp (cfg);
10369 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, static_data->dreg, offset_ins->dreg);
10370 } else if (field->offset == 0) {
10373 int addr_reg = mono_alloc_preg (cfg);
10374 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
10376 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
10377 MonoInst *iargs [2];
10379 g_assert (field->parent);
10380 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10381 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
10382 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
10384 MonoVTable *vtable = NULL;
10386 if (!cfg->compile_aot)
10387 vtable = mono_class_vtable (cfg->domain, klass);
10388 CHECK_TYPELOAD (klass);
10391 if (mini_field_access_needs_cctor_run (cfg, method, klass, vtable)) {
10392 if (!(g_slist_find (class_inits, klass))) {
10393 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, klass, helper_sig_class_init_trampoline, NULL);
10394 if (cfg->verbose_level > 2)
10395 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
10396 class_inits = g_slist_prepend (class_inits, klass);
10399 if (cfg->run_cctors) {
10401 /* This makes so that inline cannot trigger */
10402 /* .cctors: too many apps depend on them */
10403 /* running with a specific order... */
10405 if (! vtable->initialized)
10406 INLINE_FAILURE ("class init");
10407 ex = mono_runtime_class_init_full (vtable, FALSE);
10409 set_exception_object (cfg, ex);
10410 goto exception_exit;
10414 if (cfg->compile_aot)
10415 EMIT_NEW_SFLDACONST (cfg, ins, field);
10418 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
10420 EMIT_NEW_PCONST (cfg, ins, addr);
10423 MonoInst *iargs [1];
10424 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
10425 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
10429 /* Generate IR to do the actual load/store operation */
10431 if (op == CEE_LDSFLDA) {
10432 ins->klass = mono_class_from_mono_type (ftype);
10433 ins->type = STACK_PTR;
10435 } else if (op == CEE_STSFLD) {
10438 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, ftype, ins->dreg, 0, store_val->dreg);
10439 store->flags |= ins_flag;
10441 gboolean is_const = FALSE;
10442 MonoVTable *vtable = NULL;
10443 gpointer addr = NULL;
10445 if (!context_used) {
10446 vtable = mono_class_vtable (cfg->domain, klass);
10447 CHECK_TYPELOAD (klass);
10449 if ((ftype->attrs & FIELD_ATTRIBUTE_INIT_ONLY) && (((addr = mono_aot_readonly_field_override (field)) != NULL) ||
10450 (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) && vtable->initialized))) {
10451 int ro_type = ftype->type;
10453 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
10454 if (ro_type == MONO_TYPE_VALUETYPE && ftype->data.klass->enumtype) {
10455 ro_type = mono_class_enum_basetype (ftype->data.klass)->type;
10458 GSHAREDVT_FAILURE (op);
10460 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
10463 case MONO_TYPE_BOOLEAN:
10465 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
10469 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
10472 case MONO_TYPE_CHAR:
10474 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
10478 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
10483 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
10487 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
10492 case MONO_TYPE_PTR:
10493 case MONO_TYPE_FNPTR:
10494 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
10495 type_to_eval_stack_type ((cfg), field->type, *sp);
10498 case MONO_TYPE_STRING:
10499 case MONO_TYPE_OBJECT:
10500 case MONO_TYPE_CLASS:
10501 case MONO_TYPE_SZARRAY:
10502 case MONO_TYPE_ARRAY:
10503 if (!mono_gc_is_moving ()) {
10504 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
10505 type_to_eval_stack_type ((cfg), field->type, *sp);
10513 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
10518 case MONO_TYPE_VALUETYPE:
10528 CHECK_STACK_OVF (1);
10530 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
10531 load->flags |= ins_flag;
10544 token = read32 (ip + 1);
10545 klass = mini_get_class (method, token, generic_context);
10546 CHECK_TYPELOAD (klass);
10547 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
10548 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
10549 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
10550 generic_class_is_reference_type (cfg, klass)) {
10551 /* insert call to write barrier */
10552 emit_write_barrier (cfg, sp [0], sp [1]);
10564 const char *data_ptr;
10566 guint32 field_token;
10572 token = read32 (ip + 1);
10574 klass = mini_get_class (method, token, generic_context);
10575 CHECK_TYPELOAD (klass);
10577 context_used = mini_class_check_context_used (cfg, klass);
10579 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
10580 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_OVF_U4);
10581 ins->sreg1 = sp [0]->dreg;
10582 ins->type = STACK_I4;
10583 ins->dreg = alloc_ireg (cfg);
10584 MONO_ADD_INS (cfg->cbb, ins);
10585 *sp = mono_decompose_opcode (cfg, ins);
10588 if (context_used) {
10589 MonoInst *args [3];
10590 MonoClass *array_class = mono_array_class_get (klass, 1);
10591 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class);
10593 /* FIXME: Use OP_NEWARR and decompose later to help abcrem */
10596 args [0] = emit_get_rgctx_klass (cfg, context_used,
10597 array_class, MONO_RGCTX_INFO_VTABLE);
10602 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
10604 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
10606 if (cfg->opt & MONO_OPT_SHARED) {
10607 /* Decompose now to avoid problems with references to the domainvar */
10608 MonoInst *iargs [3];
10610 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10611 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10612 iargs [2] = sp [0];
10614 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
10616 /* Decompose later since it is needed by abcrem */
10617 MonoClass *array_type = mono_array_class_get (klass, 1);
10618 mono_class_vtable (cfg->domain, array_type);
10619 CHECK_TYPELOAD (array_type);
10621 MONO_INST_NEW (cfg, ins, OP_NEWARR);
10622 ins->dreg = alloc_ireg_ref (cfg);
10623 ins->sreg1 = sp [0]->dreg;
10624 ins->inst_newa_class = klass;
10625 ins->type = STACK_OBJ;
10626 ins->klass = array_type;
10627 MONO_ADD_INS (cfg->cbb, ins);
10628 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
10629 cfg->cbb->has_array_access = TRUE;
10631 /* Needed so mono_emit_load_get_addr () gets called */
10632 mono_get_got_var (cfg);
10642 * we inline/optimize the initialization sequence if possible.
10643 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
10644 * for small sizes open code the memcpy
10645 * ensure the rva field is big enough
10647 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
10648 MonoMethod *memcpy_method = get_memcpy_method ();
10649 MonoInst *iargs [3];
10650 int add_reg = alloc_ireg_mp (cfg);
10652 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, G_STRUCT_OFFSET (MonoArray, vector));
10653 if (cfg->compile_aot) {
10654 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
10656 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
10658 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
10659 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
10668 if (sp [0]->type != STACK_OBJ)
10671 MONO_INST_NEW (cfg, ins, OP_LDLEN);
10672 ins->dreg = alloc_preg (cfg);
10673 ins->sreg1 = sp [0]->dreg;
10674 ins->type = STACK_I4;
10675 /* This flag will be inherited by the decomposition */
10676 ins->flags |= MONO_INST_FAULT;
10677 MONO_ADD_INS (cfg->cbb, ins);
10678 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
10679 cfg->cbb->has_array_access = TRUE;
10687 if (sp [0]->type != STACK_OBJ)
10690 cfg->flags |= MONO_CFG_HAS_LDELEMA;
10692 klass = mini_get_class (method, read32 (ip + 1), generic_context);
10693 CHECK_TYPELOAD (klass);
10694 /* we need to make sure that this array is exactly the type it needs
10695 * to be for correctness. the wrappers are lax with their usage
10696 * so we need to ignore them here
10698 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
10699 MonoClass *array_class = mono_array_class_get (klass, 1);
10700 mini_emit_check_array_type (cfg, sp [0], array_class);
10701 CHECK_TYPELOAD (array_class);
10705 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
10710 case CEE_LDELEM_I1:
10711 case CEE_LDELEM_U1:
10712 case CEE_LDELEM_I2:
10713 case CEE_LDELEM_U2:
10714 case CEE_LDELEM_I4:
10715 case CEE_LDELEM_U4:
10716 case CEE_LDELEM_I8:
10718 case CEE_LDELEM_R4:
10719 case CEE_LDELEM_R8:
10720 case CEE_LDELEM_REF: {
10726 if (*ip == CEE_LDELEM) {
10728 token = read32 (ip + 1);
10729 klass = mini_get_class (method, token, generic_context);
10730 CHECK_TYPELOAD (klass);
10731 mono_class_init (klass);
10734 klass = array_access_to_klass (*ip);
10736 if (sp [0]->type != STACK_OBJ)
10739 cfg->flags |= MONO_CFG_HAS_LDELEMA;
10741 if (mini_is_gsharedvt_variable_klass (cfg, klass)) {
10742 // FIXME-VT: OP_ICONST optimization
10743 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
10744 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
10745 ins->opcode = OP_LOADV_MEMBASE;
10746 } else if (sp [1]->opcode == OP_ICONST) {
10747 int array_reg = sp [0]->dreg;
10748 int index_reg = sp [1]->dreg;
10749 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
10751 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
10752 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
10754 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
10755 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
10758 if (*ip == CEE_LDELEM)
10765 case CEE_STELEM_I1:
10766 case CEE_STELEM_I2:
10767 case CEE_STELEM_I4:
10768 case CEE_STELEM_I8:
10769 case CEE_STELEM_R4:
10770 case CEE_STELEM_R8:
10771 case CEE_STELEM_REF:
10776 cfg->flags |= MONO_CFG_HAS_LDELEMA;
10778 if (*ip == CEE_STELEM) {
10780 token = read32 (ip + 1);
10781 klass = mini_get_class (method, token, generic_context);
10782 CHECK_TYPELOAD (klass);
10783 mono_class_init (klass);
10786 klass = array_access_to_klass (*ip);
10788 if (sp [0]->type != STACK_OBJ)
10791 emit_array_store (cfg, klass, sp, TRUE);
10793 if (*ip == CEE_STELEM)
10800 case CEE_CKFINITE: {
10804 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
10805 ins->sreg1 = sp [0]->dreg;
10806 ins->dreg = alloc_freg (cfg);
10807 ins->type = STACK_R8;
10808 MONO_ADD_INS (bblock, ins);
10810 *sp++ = mono_decompose_opcode (cfg, ins);
10815 case CEE_REFANYVAL: {
10816 MonoInst *src_var, *src;
10818 int klass_reg = alloc_preg (cfg);
10819 int dreg = alloc_preg (cfg);
10821 GSHAREDVT_FAILURE (*ip);
10824 MONO_INST_NEW (cfg, ins, *ip);
10827 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
10828 CHECK_TYPELOAD (klass);
10829 mono_class_init (klass);
10831 context_used = mini_class_check_context_used (cfg, klass);
10834 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
10836 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
10837 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
10838 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass));
10840 if (context_used) {
10841 MonoInst *klass_ins;
10843 klass_ins = emit_get_rgctx_klass (cfg, context_used,
10844 klass, MONO_RGCTX_INFO_KLASS);
10847 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
10848 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
10850 mini_emit_class_check (cfg, klass_reg, klass);
10852 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, value));
10853 ins->type = STACK_MP;
10858 case CEE_MKREFANY: {
10859 MonoInst *loc, *addr;
10861 GSHAREDVT_FAILURE (*ip);
10864 MONO_INST_NEW (cfg, ins, *ip);
10867 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
10868 CHECK_TYPELOAD (klass);
10869 mono_class_init (klass);
10871 context_used = mini_class_check_context_used (cfg, klass);
10873 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
10874 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
10876 if (context_used) {
10877 MonoInst *const_ins;
10878 int type_reg = alloc_preg (cfg);
10880 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
10881 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
10882 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, G_STRUCT_OFFSET (MonoClass, byval_arg));
10883 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
10884 } else if (cfg->compile_aot) {
10885 int const_reg = alloc_preg (cfg);
10886 int type_reg = alloc_preg (cfg);
10888 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
10889 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
10890 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, G_STRUCT_OFFSET (MonoClass, byval_arg));
10891 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
10893 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
10894 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), klass);
10896 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
10898 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
10899 ins->type = STACK_VTYPE;
10900 ins->klass = mono_defaults.typed_reference_class;
10905 case CEE_LDTOKEN: {
10907 MonoClass *handle_class;
10909 CHECK_STACK_OVF (1);
10912 n = read32 (ip + 1);
10914 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
10915 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
10916 handle = mono_method_get_wrapper_data (method, n);
10917 handle_class = mono_method_get_wrapper_data (method, n + 1);
10918 if (handle_class == mono_defaults.typehandle_class)
10919 handle = &((MonoClass*)handle)->byval_arg;
10922 handle = mono_ldtoken (image, n, &handle_class, generic_context);
10926 mono_class_init (handle_class);
10927 if (cfg->generic_sharing_context) {
10928 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
10929 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
10930 /* This case handles ldtoken
10931 of an open type, like for
10934 } else if (handle_class == mono_defaults.typehandle_class) {
10935 /* If we get a MONO_TYPE_CLASS
10936 then we need to provide the
10938 instantiation of it. */
10939 if (mono_type_get_type (handle) == MONO_TYPE_CLASS)
10942 context_used = mini_class_check_context_used (cfg, mono_class_from_mono_type (handle));
10943 } else if (handle_class == mono_defaults.fieldhandle_class)
10944 context_used = mini_class_check_context_used (cfg, ((MonoClassField*)handle)->parent);
10945 else if (handle_class == mono_defaults.methodhandle_class)
10946 context_used = mini_method_check_context_used (cfg, handle);
10948 g_assert_not_reached ();
10951 if ((cfg->opt & MONO_OPT_SHARED) &&
10952 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
10953 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
10954 MonoInst *addr, *vtvar, *iargs [3];
10955 int method_context_used;
10957 method_context_used = mini_method_check_context_used (cfg, method);
10959 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
10961 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
10962 EMIT_NEW_ICONST (cfg, iargs [1], n);
10963 if (method_context_used) {
10964 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
10965 method, MONO_RGCTX_INFO_METHOD);
10966 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
10968 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
10969 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
10971 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
10973 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
10975 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
10977 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
10978 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
10979 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
10980 (cmethod->klass == mono_defaults.systemtype_class) &&
10981 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
10982 MonoClass *tclass = mono_class_from_mono_type (handle);
10984 mono_class_init (tclass);
10985 if (context_used) {
10986 ins = emit_get_rgctx_klass (cfg, context_used,
10987 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
10988 } else if (cfg->compile_aot) {
10989 if (method->wrapper_type) {
10990 if (mono_class_get (tclass->image, tclass->type_token) == tclass && !generic_context) {
10991 /* Special case for static synchronized wrappers */
10992 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
10994 /* FIXME: n is not a normal token */
10996 EMIT_NEW_PCONST (cfg, ins, NULL);
10999 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
11002 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
11004 ins->type = STACK_OBJ;
11005 ins->klass = cmethod->klass;
11008 MonoInst *addr, *vtvar;
11010 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
11012 if (context_used) {
11013 if (handle_class == mono_defaults.typehandle_class) {
11014 ins = emit_get_rgctx_klass (cfg, context_used,
11015 mono_class_from_mono_type (handle),
11016 MONO_RGCTX_INFO_TYPE);
11017 } else if (handle_class == mono_defaults.methodhandle_class) {
11018 ins = emit_get_rgctx_method (cfg, context_used,
11019 handle, MONO_RGCTX_INFO_METHOD);
11020 } else if (handle_class == mono_defaults.fieldhandle_class) {
11021 ins = emit_get_rgctx_field (cfg, context_used,
11022 handle, MONO_RGCTX_INFO_CLASS_FIELD);
11024 g_assert_not_reached ();
11026 } else if (cfg->compile_aot) {
11027 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n, generic_context);
11029 EMIT_NEW_PCONST (cfg, ins, handle);
11031 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11032 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
11033 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11043 MONO_INST_NEW (cfg, ins, OP_THROW);
11045 ins->sreg1 = sp [0]->dreg;
11047 bblock->out_of_line = TRUE;
11048 MONO_ADD_INS (bblock, ins);
11049 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
11050 MONO_ADD_INS (bblock, ins);
11053 link_bblock (cfg, bblock, end_bblock);
11054 start_new_bblock = 1;
11056 case CEE_ENDFINALLY:
11057 /* mono_save_seq_point_info () depends on this */
11058 if (sp != stack_start)
11059 emit_seq_point (cfg, method, ip, FALSE, FALSE);
11060 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
11061 MONO_ADD_INS (bblock, ins);
11063 start_new_bblock = 1;
11066 * Control will leave the method so empty the stack, otherwise
11067 * the next basic block will start with a nonempty stack.
11069 while (sp != stack_start) {
11074 case CEE_LEAVE_S: {
11077 if (*ip == CEE_LEAVE) {
11079 target = ip + 5 + (gint32)read32(ip + 1);
11082 target = ip + 2 + (signed char)(ip [1]);
11085 /* empty the stack */
11086 while (sp != stack_start) {
11091 * If this leave statement is in a catch block, check for a
11092 * pending exception, and rethrow it if necessary.
11093 * We avoid doing this in runtime invoke wrappers, since those are called
11094 * by native code which excepts the wrapper to catch all exceptions.
11096 for (i = 0; i < header->num_clauses; ++i) {
11097 MonoExceptionClause *clause = &header->clauses [i];
11100 * Use <= in the final comparison to handle clauses with multiple
11101 * leave statements, like in bug #78024.
11102 * The ordering of the exception clauses guarantees that we find the
11103 * innermost clause.
11105 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
11107 MonoBasicBlock *dont_throw;
11112 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
11115 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
11117 NEW_BBLOCK (cfg, dont_throw);
11120 * Currently, we always rethrow the abort exception, despite the
11121 * fact that this is not correct. See thread6.cs for an example.
11122 * But propagating the abort exception is more important than
11123 * getting the sematics right.
11125 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
11126 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
11127 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
11129 MONO_START_BB (cfg, dont_throw);
11134 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
11136 MonoExceptionClause *clause;
11138 for (tmp = handlers; tmp; tmp = tmp->next) {
11139 clause = tmp->data;
11140 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
11142 link_bblock (cfg, bblock, tblock);
11143 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
11144 ins->inst_target_bb = tblock;
11145 ins->inst_eh_block = clause;
11146 MONO_ADD_INS (bblock, ins);
11147 bblock->has_call_handler = 1;
11148 if (COMPILE_LLVM (cfg)) {
11149 MonoBasicBlock *target_bb;
11152 * Link the finally bblock with the target, since it will
11153 * conceptually branch there.
11154 * FIXME: Have to link the bblock containing the endfinally.
11156 GET_BBLOCK (cfg, target_bb, target);
11157 link_bblock (cfg, tblock, target_bb);
11160 g_list_free (handlers);
11163 MONO_INST_NEW (cfg, ins, OP_BR);
11164 MONO_ADD_INS (bblock, ins);
11165 GET_BBLOCK (cfg, tblock, target);
11166 link_bblock (cfg, bblock, tblock);
11167 ins->inst_target_bb = tblock;
11168 start_new_bblock = 1;
11170 if (*ip == CEE_LEAVE)
11179 * Mono specific opcodes
11181 case MONO_CUSTOM_PREFIX: {
11183 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
11187 case CEE_MONO_ICALL: {
11189 MonoJitICallInfo *info;
11191 token = read32 (ip + 2);
11192 func = mono_method_get_wrapper_data (method, token);
11193 info = mono_find_jit_icall_by_addr (func);
11195 g_error ("Could not find icall address in wrapper %s", mono_method_full_name (method, 1));
11198 CHECK_STACK (info->sig->param_count);
11199 sp -= info->sig->param_count;
11201 ins = mono_emit_jit_icall (cfg, info->func, sp);
11202 if (!MONO_TYPE_IS_VOID (info->sig->ret))
11206 inline_costs += 10 * num_calls++;
11210 case CEE_MONO_LDPTR: {
11213 CHECK_STACK_OVF (1);
11215 token = read32 (ip + 2);
11217 ptr = mono_method_get_wrapper_data (method, token);
11218 /* FIXME: Generalize this */
11219 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
11220 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
11225 EMIT_NEW_PCONST (cfg, ins, ptr);
11228 inline_costs += 10 * num_calls++;
11229 /* Can't embed random pointers into AOT code */
11233 case CEE_MONO_JIT_ICALL_ADDR: {
11234 MonoJitICallInfo *callinfo;
11237 CHECK_STACK_OVF (1);
11239 token = read32 (ip + 2);
11241 ptr = mono_method_get_wrapper_data (method, token);
11242 callinfo = mono_find_jit_icall_by_addr (ptr);
11243 g_assert (callinfo);
11244 EMIT_NEW_JIT_ICALL_ADDRCONST (cfg, ins, (char*)callinfo->name);
11247 inline_costs += 10 * num_calls++;
11250 case CEE_MONO_ICALL_ADDR: {
11251 MonoMethod *cmethod;
11254 CHECK_STACK_OVF (1);
11256 token = read32 (ip + 2);
11258 cmethod = mono_method_get_wrapper_data (method, token);
11260 if (cfg->compile_aot) {
11261 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
11263 ptr = mono_lookup_internal_call (cmethod);
11265 EMIT_NEW_PCONST (cfg, ins, ptr);
11271 case CEE_MONO_VTADDR: {
11272 MonoInst *src_var, *src;
11278 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
11279 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
11284 case CEE_MONO_NEWOBJ: {
11285 MonoInst *iargs [2];
11287 CHECK_STACK_OVF (1);
11289 token = read32 (ip + 2);
11290 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
11291 mono_class_init (klass);
11292 NEW_DOMAINCONST (cfg, iargs [0]);
11293 MONO_ADD_INS (cfg->cbb, iargs [0]);
11294 NEW_CLASSCONST (cfg, iargs [1], klass);
11295 MONO_ADD_INS (cfg->cbb, iargs [1]);
11296 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
11298 inline_costs += 10 * num_calls++;
11301 case CEE_MONO_OBJADDR:
11304 MONO_INST_NEW (cfg, ins, OP_MOVE);
11305 ins->dreg = alloc_ireg_mp (cfg);
11306 ins->sreg1 = sp [0]->dreg;
11307 ins->type = STACK_MP;
11308 MONO_ADD_INS (cfg->cbb, ins);
11312 case CEE_MONO_LDNATIVEOBJ:
11314 * Similar to LDOBJ, but instead load the unmanaged
11315 * representation of the vtype to the stack.
11320 token = read32 (ip + 2);
11321 klass = mono_method_get_wrapper_data (method, token);
11322 g_assert (klass->valuetype);
11323 mono_class_init (klass);
11326 MonoInst *src, *dest, *temp;
11329 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
11330 temp->backend.is_pinvoke = 1;
11331 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
11332 mini_emit_stobj (cfg, dest, src, klass, TRUE);
11334 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
11335 dest->type = STACK_VTYPE;
11336 dest->klass = klass;
11342 case CEE_MONO_RETOBJ: {
11344 * Same as RET, but return the native representation of a vtype
11347 g_assert (cfg->ret);
11348 g_assert (mono_method_signature (method)->pinvoke);
11353 token = read32 (ip + 2);
11354 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
11356 if (!cfg->vret_addr) {
11357 g_assert (cfg->ret_var_is_local);
11359 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
11361 EMIT_NEW_RETLOADA (cfg, ins);
11363 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
11365 if (sp != stack_start)
11368 MONO_INST_NEW (cfg, ins, OP_BR);
11369 ins->inst_target_bb = end_bblock;
11370 MONO_ADD_INS (bblock, ins);
11371 link_bblock (cfg, bblock, end_bblock);
11372 start_new_bblock = 1;
11376 case CEE_MONO_CISINST:
11377 case CEE_MONO_CCASTCLASS: {
11382 token = read32 (ip + 2);
11383 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
11384 if (ip [1] == CEE_MONO_CISINST)
11385 ins = handle_cisinst (cfg, klass, sp [0]);
11387 ins = handle_ccastclass (cfg, klass, sp [0]);
11393 case CEE_MONO_SAVE_LMF:
11394 case CEE_MONO_RESTORE_LMF:
11395 #ifdef MONO_ARCH_HAVE_LMF_OPS
11396 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
11397 MONO_ADD_INS (bblock, ins);
11398 cfg->need_lmf_area = TRUE;
11402 case CEE_MONO_CLASSCONST:
11403 CHECK_STACK_OVF (1);
11405 token = read32 (ip + 2);
11406 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
11409 inline_costs += 10 * num_calls++;
11411 case CEE_MONO_NOT_TAKEN:
11412 bblock->out_of_line = TRUE;
11415 case CEE_MONO_TLS: {
11418 CHECK_STACK_OVF (1);
11420 key = (gint32)read32 (ip + 2);
11421 g_assert (key < TLS_KEY_NUM);
11423 ins = mono_create_tls_get (cfg, key);
11425 if (cfg->compile_aot) {
11427 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
11428 ins->dreg = alloc_preg (cfg);
11429 ins->type = STACK_PTR;
11431 g_assert_not_reached ();
11434 ins->type = STACK_PTR;
11435 MONO_ADD_INS (bblock, ins);
11440 case CEE_MONO_DYN_CALL: {
11441 MonoCallInst *call;
11443 /* It would be easier to call a trampoline, but that would put an
11444 * extra frame on the stack, confusing exception handling. So
11445 * implement it inline using an opcode for now.
11448 if (!cfg->dyn_call_var) {
11449 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
11450 /* prevent it from being register allocated */
11451 cfg->dyn_call_var->flags |= MONO_INST_VOLATILE;
11454 /* Has to use a call inst since it local regalloc expects it */
11455 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
11456 ins = (MonoInst*)call;
11458 ins->sreg1 = sp [0]->dreg;
11459 ins->sreg2 = sp [1]->dreg;
11460 MONO_ADD_INS (bblock, ins);
11462 cfg->param_area = MAX (cfg->param_area, MONO_ARCH_DYN_CALL_PARAM_AREA);
11465 inline_costs += 10 * num_calls++;
11469 case CEE_MONO_MEMORY_BARRIER: {
11471 emit_memory_barrier (cfg, (int)read32 (ip + 1));
11475 case CEE_MONO_JIT_ATTACH: {
11476 MonoInst *args [16];
11477 MonoInst *ad_ins, *lmf_ins;
11478 MonoBasicBlock *next_bb = NULL;
11480 cfg->orig_domain_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
11482 EMIT_NEW_PCONST (cfg, ins, NULL);
11483 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
11489 ad_ins = mono_get_domain_intrinsic (cfg);
11490 lmf_ins = mono_get_lmf_intrinsic (cfg);
11493 if (MONO_ARCH_HAVE_TLS_GET && ad_ins && lmf_ins) {
11494 NEW_BBLOCK (cfg, next_bb);
11496 MONO_ADD_INS (cfg->cbb, ad_ins);
11497 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, ad_ins->dreg, 0);
11498 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, next_bb);
11500 MONO_ADD_INS (cfg->cbb, lmf_ins);
11501 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, lmf_ins->dreg, 0);
11502 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, next_bb);
11505 if (cfg->compile_aot) {
11506 /* AOT code is only used in the root domain */
11507 EMIT_NEW_PCONST (cfg, args [0], NULL);
11509 EMIT_NEW_PCONST (cfg, args [0], cfg->domain);
11511 ins = mono_emit_jit_icall (cfg, mono_jit_thread_attach, args);
11512 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
11515 MONO_START_BB (cfg, next_bb);
11521 case CEE_MONO_JIT_DETACH: {
11522 MonoInst *args [16];
11524 /* Restore the original domain */
11525 dreg = alloc_ireg (cfg);
11526 EMIT_NEW_UNALU (cfg, args [0], OP_MOVE, dreg, cfg->orig_domain_var->dreg);
11527 mono_emit_jit_icall (cfg, mono_jit_set_domain, args);
11532 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
11538 case CEE_PREFIX1: {
11541 case CEE_ARGLIST: {
11542 /* somewhat similar to LDTOKEN */
11543 MonoInst *addr, *vtvar;
11544 CHECK_STACK_OVF (1);
11545 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
11547 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11548 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
11550 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11551 ins->type = STACK_VTYPE;
11552 ins->klass = mono_defaults.argumenthandle_class;
11565 * The following transforms:
11566 * CEE_CEQ into OP_CEQ
11567 * CEE_CGT into OP_CGT
11568 * CEE_CGT_UN into OP_CGT_UN
11569 * CEE_CLT into OP_CLT
11570 * CEE_CLT_UN into OP_CLT_UN
11572 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
11574 MONO_INST_NEW (cfg, ins, cmp->opcode);
11576 cmp->sreg1 = sp [0]->dreg;
11577 cmp->sreg2 = sp [1]->dreg;
11578 type_from_op (cmp, sp [0], sp [1]);
11580 if ((sp [0]->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
11581 cmp->opcode = OP_LCOMPARE;
11582 else if (sp [0]->type == STACK_R8)
11583 cmp->opcode = OP_FCOMPARE;
11585 cmp->opcode = OP_ICOMPARE;
11586 MONO_ADD_INS (bblock, cmp);
11587 ins->type = STACK_I4;
11588 ins->dreg = alloc_dreg (cfg, ins->type);
11589 type_from_op (ins, sp [0], sp [1]);
11591 if (cmp->opcode == OP_FCOMPARE) {
11593 * The backends expect the fceq opcodes to do the
11596 cmp->opcode = OP_NOP;
11597 ins->sreg1 = cmp->sreg1;
11598 ins->sreg2 = cmp->sreg2;
11600 MONO_ADD_INS (bblock, ins);
11606 MonoInst *argconst;
11607 MonoMethod *cil_method;
11609 CHECK_STACK_OVF (1);
11611 n = read32 (ip + 2);
11612 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
11613 if (!cmethod || mono_loader_get_last_error ())
11615 mono_class_init (cmethod->klass);
11617 mono_save_token_info (cfg, image, n, cmethod);
11619 context_used = mini_method_check_context_used (cfg, cmethod);
11621 cil_method = cmethod;
11622 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
11623 METHOD_ACCESS_FAILURE;
11625 if (mono_security_cas_enabled ()) {
11626 if (check_linkdemand (cfg, method, cmethod))
11627 INLINE_FAILURE ("linkdemand");
11628 CHECK_CFG_EXCEPTION;
11629 } else if (mono_security_core_clr_enabled ()) {
11630 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
11634 * Optimize the common case of ldftn+delegate creation
11636 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
11637 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
11638 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
11639 MonoInst *target_ins;
11640 MonoMethod *invoke;
11641 int invoke_context_used;
11643 invoke = mono_get_delegate_invoke (ctor_method->klass);
11644 if (!invoke || !mono_method_signature (invoke))
11647 invoke_context_used = mini_method_check_context_used (cfg, invoke);
11649 target_ins = sp [-1];
11651 if (mono_security_core_clr_enabled ())
11652 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method, bblock, ip);
11654 if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
11655 /*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
11656 if (mono_method_signature (invoke)->param_count == mono_method_signature (cmethod)->param_count) {
11657 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0);
11658 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException");
11662 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE)
11663 /* FIXME: SGEN support */
11664 if (invoke_context_used == 0) {
11666 if (cfg->verbose_level > 3)
11667 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
11669 *sp = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used);
11670 CHECK_CFG_EXCEPTION;
11679 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
11680 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
11684 inline_costs += 10 * num_calls++;
11687 case CEE_LDVIRTFTN: {
11688 MonoInst *args [2];
11692 n = read32 (ip + 2);
11693 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
11694 if (!cmethod || mono_loader_get_last_error ())
11696 mono_class_init (cmethod->klass);
11698 context_used = mini_method_check_context_used (cfg, cmethod);
11700 if (mono_security_cas_enabled ()) {
11701 if (check_linkdemand (cfg, method, cmethod))
11702 INLINE_FAILURE ("linkdemand");
11703 CHECK_CFG_EXCEPTION;
11704 } else if (mono_security_core_clr_enabled ()) {
11705 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
11711 args [1] = emit_get_rgctx_method (cfg, context_used,
11712 cmethod, MONO_RGCTX_INFO_METHOD);
11715 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
11717 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
11720 inline_costs += 10 * num_calls++;
11724 CHECK_STACK_OVF (1);
11726 n = read16 (ip + 2);
11728 EMIT_NEW_ARGLOAD (cfg, ins, n);
11733 CHECK_STACK_OVF (1);
11735 n = read16 (ip + 2);
11737 NEW_ARGLOADA (cfg, ins, n);
11738 MONO_ADD_INS (cfg->cbb, ins);
11746 n = read16 (ip + 2);
11748 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
11750 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
11754 CHECK_STACK_OVF (1);
11756 n = read16 (ip + 2);
11758 EMIT_NEW_LOCLOAD (cfg, ins, n);
11763 unsigned char *tmp_ip;
11764 CHECK_STACK_OVF (1);
11766 n = read16 (ip + 2);
11769 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
11775 EMIT_NEW_LOCLOADA (cfg, ins, n);
11784 n = read16 (ip + 2);
11786 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
11788 emit_stloc_ir (cfg, sp, header, n);
11795 if (sp != stack_start)
11797 if (cfg->method != method)
11799 * Inlining this into a loop in a parent could lead to
11800 * stack overflows which is different behavior than the
11801 * non-inlined case, thus disable inlining in this case.
11803 goto inline_failure;
11805 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
11806 ins->dreg = alloc_preg (cfg);
11807 ins->sreg1 = sp [0]->dreg;
11808 ins->type = STACK_PTR;
11809 MONO_ADD_INS (cfg->cbb, ins);
11811 cfg->flags |= MONO_CFG_HAS_ALLOCA;
11813 ins->flags |= MONO_INST_INIT;
11818 case CEE_ENDFILTER: {
11819 MonoExceptionClause *clause, *nearest;
11820 int cc, nearest_num;
11824 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
11826 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
11827 ins->sreg1 = (*sp)->dreg;
11828 MONO_ADD_INS (bblock, ins);
11829 start_new_bblock = 1;
11834 for (cc = 0; cc < header->num_clauses; ++cc) {
11835 clause = &header->clauses [cc];
11836 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
11837 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
11838 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
11843 g_assert (nearest);
11844 if ((ip - header->code) != nearest->handler_offset)
11849 case CEE_UNALIGNED_:
11850 ins_flag |= MONO_INST_UNALIGNED;
11851 /* FIXME: record alignment? we can assume 1 for now */
11855 case CEE_VOLATILE_:
11856 ins_flag |= MONO_INST_VOLATILE;
11860 ins_flag |= MONO_INST_TAILCALL;
11861 cfg->flags |= MONO_CFG_HAS_TAIL;
11862 /* Can't inline tail calls at this time */
11863 inline_costs += 100000;
11870 token = read32 (ip + 2);
11871 klass = mini_get_class (method, token, generic_context);
11872 CHECK_TYPELOAD (klass);
11873 if (generic_class_is_reference_type (cfg, klass))
11874 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
11876 mini_emit_initobj (cfg, *sp, NULL, klass);
11880 case CEE_CONSTRAINED_:
11882 token = read32 (ip + 2);
11883 constrained_call = mini_get_class (method, token, generic_context);
11884 CHECK_TYPELOAD (constrained_call);
11888 case CEE_INITBLK: {
11889 MonoInst *iargs [3];
11893 if ((ip [1] == CEE_CPBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
11894 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
11895 } else if ((ip [1] == CEE_INITBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
11896 /* emit_memset only works when val == 0 */
11897 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
11899 iargs [0] = sp [0];
11900 iargs [1] = sp [1];
11901 iargs [2] = sp [2];
11902 if (ip [1] == CEE_CPBLK) {
11903 MonoMethod *memcpy_method = get_memcpy_method ();
11904 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
11906 MonoMethod *memset_method = get_memset_method ();
11907 mono_emit_method_call (cfg, memset_method, iargs, NULL);
11917 ins_flag |= MONO_INST_NOTYPECHECK;
11919 ins_flag |= MONO_INST_NORANGECHECK;
11920 /* we ignore the no-nullcheck for now since we
11921 * really do it explicitly only when doing callvirt->call
11925 case CEE_RETHROW: {
11927 int handler_offset = -1;
11929 for (i = 0; i < header->num_clauses; ++i) {
11930 MonoExceptionClause *clause = &header->clauses [i];
11931 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
11932 handler_offset = clause->handler_offset;
11937 bblock->flags |= BB_EXCEPTION_UNSAFE;
11939 g_assert (handler_offset != -1);
11941 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
11942 MONO_INST_NEW (cfg, ins, OP_RETHROW);
11943 ins->sreg1 = load->dreg;
11944 MONO_ADD_INS (bblock, ins);
11946 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
11947 MONO_ADD_INS (bblock, ins);
11950 link_bblock (cfg, bblock, end_bblock);
11951 start_new_bblock = 1;
11959 GSHAREDVT_FAILURE (*ip);
11961 CHECK_STACK_OVF (1);
11963 token = read32 (ip + 2);
11964 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !method->klass->image->dynamic && !generic_context) {
11965 MonoType *type = mono_type_create_from_typespec (image, token);
11966 val = mono_type_size (type, &ialign);
11968 MonoClass *klass = mono_class_get_full (image, token, generic_context);
11969 CHECK_TYPELOAD (klass);
11970 mono_class_init (klass);
11971 val = mono_type_size (&klass->byval_arg, &ialign);
11973 EMIT_NEW_ICONST (cfg, ins, val);
11978 case CEE_REFANYTYPE: {
11979 MonoInst *src_var, *src;
11981 GSHAREDVT_FAILURE (*ip);
11987 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
11989 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
11990 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
11991 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, type));
11996 case CEE_READONLY_:
12009 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
12019 g_warning ("opcode 0x%02x not handled", *ip);
12023 if (start_new_bblock != 1)
12026 bblock->cil_length = ip - bblock->cil_code;
12027 if (bblock->next_bb) {
12028 /* This could already be set because of inlining, #693905 */
12029 MonoBasicBlock *bb = bblock;
12031 while (bb->next_bb)
12033 bb->next_bb = end_bblock;
12035 bblock->next_bb = end_bblock;
12038 if (cfg->method == method && cfg->domainvar) {
12040 MonoInst *get_domain;
12042 cfg->cbb = init_localsbb;
12044 if ((get_domain = mono_get_domain_intrinsic (cfg))) {
12045 MONO_ADD_INS (cfg->cbb, get_domain);
12047 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
12049 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
12050 MONO_ADD_INS (cfg->cbb, store);
12053 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
12054 if (cfg->compile_aot)
12055 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
12056 mono_get_got_var (cfg);
12059 if (cfg->method == method && cfg->got_var)
12060 mono_emit_load_got_addr (cfg);
12062 if (init_localsbb) {
12063 cfg->cbb = init_localsbb;
12065 for (i = 0; i < header->num_locals; ++i) {
12066 emit_init_local (cfg, i, header->locals [i], init_locals);
12070 if (cfg->init_ref_vars && cfg->method == method) {
12071 /* Emit initialization for ref vars */
12072 // FIXME: Avoid duplication initialization for IL locals.
12073 for (i = 0; i < cfg->num_varinfo; ++i) {
12074 MonoInst *ins = cfg->varinfo [i];
12076 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
12077 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
12081 if (cfg->lmf_var && cfg->method == method) {
12082 cfg->cbb = init_localsbb;
12083 emit_push_lmf (cfg);
12087 MonoBasicBlock *bb;
12090 * Make seq points at backward branch targets interruptable.
12092 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
12093 if (bb->code && bb->in_count > 1 && bb->code->opcode == OP_SEQ_POINT)
12094 bb->code->flags |= MONO_INST_SINGLE_STEP_LOC;
12097 /* Add a sequence point for method entry/exit events */
12099 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
12100 MONO_ADD_INS (init_localsbb, ins);
12101 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
12102 MONO_ADD_INS (cfg->bb_exit, ins);
12106 * Add seq points for IL offsets which have line number info, but wasn't generated a seq point during JITting because
12107 * the code they refer to was dead (#11880).
12109 if (sym_seq_points) {
12110 for (i = 0; i < header->code_size; ++i) {
12111 if (mono_bitset_test_fast (seq_point_locs, i) && !mono_bitset_test_fast (seq_point_set_locs, i)) {
12114 NEW_SEQ_POINT (cfg, ins, i, FALSE);
12115 mono_add_seq_point (cfg, NULL, ins, SEQ_POINT_NATIVE_OFFSET_DEAD_CODE);
12122 if (cfg->method == method) {
12123 MonoBasicBlock *bb;
12124 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
12125 bb->region = mono_find_block_region (cfg, bb->real_offset);
12127 mono_create_spvar_for_region (cfg, bb->region);
12128 if (cfg->verbose_level > 2)
12129 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
12133 g_slist_free (class_inits);
12134 dont_inline = g_list_remove (dont_inline, method);
12136 if (inline_costs < 0) {
12139 /* Method is too large */
12140 mname = mono_method_full_name (method, TRUE);
12141 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
12142 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
12144 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
12145 mono_basic_block_free (original_bb);
12149 if ((cfg->verbose_level > 2) && (cfg->method == method))
12150 mono_print_code (cfg, "AFTER METHOD-TO-IR");
12152 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
12153 mono_basic_block_free (original_bb);
12154 return inline_costs;
12157 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
12164 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
12168 set_exception_type_from_invalid_il (cfg, method, ip);
12172 g_slist_free (class_inits);
12173 mono_basic_block_free (original_bb);
12174 dont_inline = g_list_remove (dont_inline, method);
12175 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
12180 store_membase_reg_to_store_membase_imm (int opcode)
12183 case OP_STORE_MEMBASE_REG:
12184 return OP_STORE_MEMBASE_IMM;
12185 case OP_STOREI1_MEMBASE_REG:
12186 return OP_STOREI1_MEMBASE_IMM;
12187 case OP_STOREI2_MEMBASE_REG:
12188 return OP_STOREI2_MEMBASE_IMM;
12189 case OP_STOREI4_MEMBASE_REG:
12190 return OP_STOREI4_MEMBASE_IMM;
12191 case OP_STOREI8_MEMBASE_REG:
12192 return OP_STOREI8_MEMBASE_IMM;
12194 g_assert_not_reached ();
12201 mono_op_to_op_imm (int opcode)
12205 return OP_IADD_IMM;
12207 return OP_ISUB_IMM;
12209 return OP_IDIV_IMM;
12211 return OP_IDIV_UN_IMM;
12213 return OP_IREM_IMM;
12215 return OP_IREM_UN_IMM;
12217 return OP_IMUL_IMM;
12219 return OP_IAND_IMM;
12223 return OP_IXOR_IMM;
12225 return OP_ISHL_IMM;
12227 return OP_ISHR_IMM;
12229 return OP_ISHR_UN_IMM;
12232 return OP_LADD_IMM;
12234 return OP_LSUB_IMM;
12236 return OP_LAND_IMM;
12240 return OP_LXOR_IMM;
12242 return OP_LSHL_IMM;
12244 return OP_LSHR_IMM;
12246 return OP_LSHR_UN_IMM;
12249 return OP_COMPARE_IMM;
12251 return OP_ICOMPARE_IMM;
12253 return OP_LCOMPARE_IMM;
12255 case OP_STORE_MEMBASE_REG:
12256 return OP_STORE_MEMBASE_IMM;
12257 case OP_STOREI1_MEMBASE_REG:
12258 return OP_STOREI1_MEMBASE_IMM;
12259 case OP_STOREI2_MEMBASE_REG:
12260 return OP_STOREI2_MEMBASE_IMM;
12261 case OP_STOREI4_MEMBASE_REG:
12262 return OP_STOREI4_MEMBASE_IMM;
12264 #if defined(TARGET_X86) || defined (TARGET_AMD64)
12266 return OP_X86_PUSH_IMM;
12267 case OP_X86_COMPARE_MEMBASE_REG:
12268 return OP_X86_COMPARE_MEMBASE_IMM;
12270 #if defined(TARGET_AMD64)
12271 case OP_AMD64_ICOMPARE_MEMBASE_REG:
12272 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
12274 case OP_VOIDCALL_REG:
12275 return OP_VOIDCALL;
12283 return OP_LOCALLOC_IMM;
12290 ldind_to_load_membase (int opcode)
12294 return OP_LOADI1_MEMBASE;
12296 return OP_LOADU1_MEMBASE;
12298 return OP_LOADI2_MEMBASE;
12300 return OP_LOADU2_MEMBASE;
12302 return OP_LOADI4_MEMBASE;
12304 return OP_LOADU4_MEMBASE;
12306 return OP_LOAD_MEMBASE;
12307 case CEE_LDIND_REF:
12308 return OP_LOAD_MEMBASE;
12310 return OP_LOADI8_MEMBASE;
12312 return OP_LOADR4_MEMBASE;
12314 return OP_LOADR8_MEMBASE;
12316 g_assert_not_reached ();
12323 stind_to_store_membase (int opcode)
12327 return OP_STOREI1_MEMBASE_REG;
12329 return OP_STOREI2_MEMBASE_REG;
12331 return OP_STOREI4_MEMBASE_REG;
12333 case CEE_STIND_REF:
12334 return OP_STORE_MEMBASE_REG;
12336 return OP_STOREI8_MEMBASE_REG;
12338 return OP_STORER4_MEMBASE_REG;
12340 return OP_STORER8_MEMBASE_REG;
12342 g_assert_not_reached ();
12349 mono_load_membase_to_load_mem (int opcode)
12351 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
12352 #if defined(TARGET_X86) || defined(TARGET_AMD64)
12354 case OP_LOAD_MEMBASE:
12355 return OP_LOAD_MEM;
12356 case OP_LOADU1_MEMBASE:
12357 return OP_LOADU1_MEM;
12358 case OP_LOADU2_MEMBASE:
12359 return OP_LOADU2_MEM;
12360 case OP_LOADI4_MEMBASE:
12361 return OP_LOADI4_MEM;
12362 case OP_LOADU4_MEMBASE:
12363 return OP_LOADU4_MEM;
12364 #if SIZEOF_REGISTER == 8
12365 case OP_LOADI8_MEMBASE:
12366 return OP_LOADI8_MEM;
12375 op_to_op_dest_membase (int store_opcode, int opcode)
12377 #if defined(TARGET_X86)
12378 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
12383 return OP_X86_ADD_MEMBASE_REG;
12385 return OP_X86_SUB_MEMBASE_REG;
12387 return OP_X86_AND_MEMBASE_REG;
12389 return OP_X86_OR_MEMBASE_REG;
12391 return OP_X86_XOR_MEMBASE_REG;
12394 return OP_X86_ADD_MEMBASE_IMM;
12397 return OP_X86_SUB_MEMBASE_IMM;
12400 return OP_X86_AND_MEMBASE_IMM;
12403 return OP_X86_OR_MEMBASE_IMM;
12406 return OP_X86_XOR_MEMBASE_IMM;
12412 #if defined(TARGET_AMD64)
12413 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
12418 return OP_X86_ADD_MEMBASE_REG;
12420 return OP_X86_SUB_MEMBASE_REG;
12422 return OP_X86_AND_MEMBASE_REG;
12424 return OP_X86_OR_MEMBASE_REG;
12426 return OP_X86_XOR_MEMBASE_REG;
12428 return OP_X86_ADD_MEMBASE_IMM;
12430 return OP_X86_SUB_MEMBASE_IMM;
12432 return OP_X86_AND_MEMBASE_IMM;
12434 return OP_X86_OR_MEMBASE_IMM;
12436 return OP_X86_XOR_MEMBASE_IMM;
12438 return OP_AMD64_ADD_MEMBASE_REG;
12440 return OP_AMD64_SUB_MEMBASE_REG;
12442 return OP_AMD64_AND_MEMBASE_REG;
12444 return OP_AMD64_OR_MEMBASE_REG;
12446 return OP_AMD64_XOR_MEMBASE_REG;
12449 return OP_AMD64_ADD_MEMBASE_IMM;
12452 return OP_AMD64_SUB_MEMBASE_IMM;
12455 return OP_AMD64_AND_MEMBASE_IMM;
12458 return OP_AMD64_OR_MEMBASE_IMM;
12461 return OP_AMD64_XOR_MEMBASE_IMM;
12471 op_to_op_store_membase (int store_opcode, int opcode)
12473 #if defined(TARGET_X86) || defined(TARGET_AMD64)
12476 if (store_opcode == OP_STOREI1_MEMBASE_REG)
12477 return OP_X86_SETEQ_MEMBASE;
12479 if (store_opcode == OP_STOREI1_MEMBASE_REG)
12480 return OP_X86_SETNE_MEMBASE;
12488 op_to_op_src1_membase (int load_opcode, int opcode)
12491 /* FIXME: This has sign extension issues */
12493 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
12494 return OP_X86_COMPARE_MEMBASE8_IMM;
12497 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
12502 return OP_X86_PUSH_MEMBASE;
12503 case OP_COMPARE_IMM:
12504 case OP_ICOMPARE_IMM:
12505 return OP_X86_COMPARE_MEMBASE_IMM;
12508 return OP_X86_COMPARE_MEMBASE_REG;
12512 #ifdef TARGET_AMD64
12513 /* FIXME: This has sign extension issues */
12515 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
12516 return OP_X86_COMPARE_MEMBASE8_IMM;
12521 #ifdef __mono_ilp32__
12522 if (load_opcode == OP_LOADI8_MEMBASE)
12524 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
12526 return OP_X86_PUSH_MEMBASE;
12528 /* FIXME: This only works for 32 bit immediates
12529 case OP_COMPARE_IMM:
12530 case OP_LCOMPARE_IMM:
12531 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
12532 return OP_AMD64_COMPARE_MEMBASE_IMM;
12534 case OP_ICOMPARE_IMM:
12535 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
12536 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
12540 #ifdef __mono_ilp32__
12541 if (load_opcode == OP_LOAD_MEMBASE)
12542 return OP_AMD64_ICOMPARE_MEMBASE_REG;
12543 if (load_opcode == OP_LOADI8_MEMBASE)
12545 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
12547 return OP_AMD64_COMPARE_MEMBASE_REG;
12550 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
12551 return OP_AMD64_ICOMPARE_MEMBASE_REG;
12560 op_to_op_src2_membase (int load_opcode, int opcode)
12563 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
12569 return OP_X86_COMPARE_REG_MEMBASE;
12571 return OP_X86_ADD_REG_MEMBASE;
12573 return OP_X86_SUB_REG_MEMBASE;
12575 return OP_X86_AND_REG_MEMBASE;
12577 return OP_X86_OR_REG_MEMBASE;
12579 return OP_X86_XOR_REG_MEMBASE;
12583 #ifdef TARGET_AMD64
12584 #ifdef __mono_ilp32__
12585 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE) ) {
12587 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)) {
12591 return OP_AMD64_ICOMPARE_REG_MEMBASE;
12593 return OP_X86_ADD_REG_MEMBASE;
12595 return OP_X86_SUB_REG_MEMBASE;
12597 return OP_X86_AND_REG_MEMBASE;
12599 return OP_X86_OR_REG_MEMBASE;
12601 return OP_X86_XOR_REG_MEMBASE;
12603 #ifdef __mono_ilp32__
12604 } else if (load_opcode == OP_LOADI8_MEMBASE) {
12606 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE)) {
12611 return OP_AMD64_COMPARE_REG_MEMBASE;
12613 return OP_AMD64_ADD_REG_MEMBASE;
12615 return OP_AMD64_SUB_REG_MEMBASE;
12617 return OP_AMD64_AND_REG_MEMBASE;
12619 return OP_AMD64_OR_REG_MEMBASE;
12621 return OP_AMD64_XOR_REG_MEMBASE;
12630 mono_op_to_op_imm_noemul (int opcode)
12633 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
12639 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
12646 #if defined(MONO_ARCH_EMULATE_MUL_DIV)
12651 return mono_op_to_op_imm (opcode);
12656 * mono_handle_global_vregs:
12658 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
12662 mono_handle_global_vregs (MonoCompile *cfg)
12664 gint32 *vreg_to_bb;
12665 MonoBasicBlock *bb;
12668 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
12670 #ifdef MONO_ARCH_SIMD_INTRINSICS
12671 if (cfg->uses_simd_intrinsics)
12672 mono_simd_simplify_indirection (cfg);
12675 /* Find local vregs used in more than one bb */
12676 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
12677 MonoInst *ins = bb->code;
12678 int block_num = bb->block_num;
12680 if (cfg->verbose_level > 2)
12681 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
12684 for (; ins; ins = ins->next) {
12685 const char *spec = INS_INFO (ins->opcode);
12686 int regtype = 0, regindex;
12689 if (G_UNLIKELY (cfg->verbose_level > 2))
12690 mono_print_ins (ins);
12692 g_assert (ins->opcode >= MONO_CEE_LAST);
12694 for (regindex = 0; regindex < 4; regindex ++) {
12697 if (regindex == 0) {
12698 regtype = spec [MONO_INST_DEST];
12699 if (regtype == ' ')
12702 } else if (regindex == 1) {
12703 regtype = spec [MONO_INST_SRC1];
12704 if (regtype == ' ')
12707 } else if (regindex == 2) {
12708 regtype = spec [MONO_INST_SRC2];
12709 if (regtype == ' ')
12712 } else if (regindex == 3) {
12713 regtype = spec [MONO_INST_SRC3];
12714 if (regtype == ' ')
12719 #if SIZEOF_REGISTER == 4
12720 /* In the LLVM case, the long opcodes are not decomposed */
12721 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
12723 * Since some instructions reference the original long vreg,
12724 * and some reference the two component vregs, it is quite hard
12725 * to determine when it needs to be global. So be conservative.
12727 if (!get_vreg_to_inst (cfg, vreg)) {
12728 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
12730 if (cfg->verbose_level > 2)
12731 printf ("LONG VREG R%d made global.\n", vreg);
12735 * Make the component vregs volatile since the optimizations can
12736 * get confused otherwise.
12738 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
12739 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
12743 g_assert (vreg != -1);
12745 prev_bb = vreg_to_bb [vreg];
12746 if (prev_bb == 0) {
12747 /* 0 is a valid block num */
12748 vreg_to_bb [vreg] = block_num + 1;
12749 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
12750 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
12753 if (!get_vreg_to_inst (cfg, vreg)) {
12754 if (G_UNLIKELY (cfg->verbose_level > 2))
12755 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
12759 if (vreg_is_ref (cfg, vreg))
12760 mono_compile_create_var_for_vreg (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL, vreg);
12762 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
12765 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
12768 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
12771 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
12774 g_assert_not_reached ();
12778 /* Flag as having been used in more than one bb */
12779 vreg_to_bb [vreg] = -1;
12785 /* If a variable is used in only one bblock, convert it into a local vreg */
12786 for (i = 0; i < cfg->num_varinfo; i++) {
12787 MonoInst *var = cfg->varinfo [i];
12788 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
12790 switch (var->type) {
12796 #if SIZEOF_REGISTER == 8
12799 #if !defined(TARGET_X86)
12800 /* Enabling this screws up the fp stack on x86 */
12803 if (mono_arch_is_soft_float ())
12806 /* Arguments are implicitly global */
12807 /* Putting R4 vars into registers doesn't work currently */
12808 /* The gsharedvt vars are implicitly referenced by ldaddr opcodes, but those opcodes are only generated later */
12809 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg && var != cfg->gsharedvt_info_var && var != cfg->gsharedvt_locals_var && var != cfg->lmf_addr_var) {
12811 * Make that the variable's liveness interval doesn't contain a call, since
12812 * that would cause the lvreg to be spilled, making the whole optimization
12815 /* This is too slow for JIT compilation */
12817 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
12819 int def_index, call_index, ins_index;
12820 gboolean spilled = FALSE;
12825 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
12826 const char *spec = INS_INFO (ins->opcode);
12828 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
12829 def_index = ins_index;
12831 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
12832 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
12833 if (call_index > def_index) {
12839 if (MONO_IS_CALL (ins))
12840 call_index = ins_index;
12850 if (G_UNLIKELY (cfg->verbose_level > 2))
12851 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
12852 var->flags |= MONO_INST_IS_DEAD;
12853 cfg->vreg_to_inst [var->dreg] = NULL;
12860 * Compress the varinfo and vars tables so the liveness computation is faster and
12861 * takes up less space.
12864 for (i = 0; i < cfg->num_varinfo; ++i) {
12865 MonoInst *var = cfg->varinfo [i];
12866 if (pos < i && cfg->locals_start == i)
12867 cfg->locals_start = pos;
12868 if (!(var->flags & MONO_INST_IS_DEAD)) {
12870 cfg->varinfo [pos] = cfg->varinfo [i];
12871 cfg->varinfo [pos]->inst_c0 = pos;
12872 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
12873 cfg->vars [pos].idx = pos;
12874 #if SIZEOF_REGISTER == 4
12875 if (cfg->varinfo [pos]->type == STACK_I8) {
12876 /* Modify the two component vars too */
12879 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
12880 var1->inst_c0 = pos;
12881 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
12882 var1->inst_c0 = pos;
12889 cfg->num_varinfo = pos;
12890 if (cfg->locals_start > cfg->num_varinfo)
12891 cfg->locals_start = cfg->num_varinfo;
12895 * mono_spill_global_vars:
12897 * Generate spill code for variables which are not allocated to registers,
12898 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
12899 * code is generated which could be optimized by the local optimization passes.
12902 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
12904 MonoBasicBlock *bb;
12906 int orig_next_vreg;
12907 guint32 *vreg_to_lvreg;
12909 guint32 i, lvregs_len;
12910 gboolean dest_has_lvreg = FALSE;
12911 guint32 stacktypes [128];
12912 MonoInst **live_range_start, **live_range_end;
12913 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
12914 int *gsharedvt_vreg_to_idx = NULL;
12916 *need_local_opts = FALSE;
12918 memset (spec2, 0, sizeof (spec2));
12920 /* FIXME: Move this function to mini.c */
12921 stacktypes ['i'] = STACK_PTR;
12922 stacktypes ['l'] = STACK_I8;
12923 stacktypes ['f'] = STACK_R8;
12924 #ifdef MONO_ARCH_SIMD_INTRINSICS
12925 stacktypes ['x'] = STACK_VTYPE;
12928 #if SIZEOF_REGISTER == 4
12929 /* Create MonoInsts for longs */
12930 for (i = 0; i < cfg->num_varinfo; i++) {
12931 MonoInst *ins = cfg->varinfo [i];
12933 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
12934 switch (ins->type) {
12939 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
12942 g_assert (ins->opcode == OP_REGOFFSET);
12944 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
12946 tree->opcode = OP_REGOFFSET;
12947 tree->inst_basereg = ins->inst_basereg;
12948 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
12950 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
12952 tree->opcode = OP_REGOFFSET;
12953 tree->inst_basereg = ins->inst_basereg;
12954 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
12964 if (cfg->compute_gc_maps) {
12965 /* registers need liveness info even for !non refs */
12966 for (i = 0; i < cfg->num_varinfo; i++) {
12967 MonoInst *ins = cfg->varinfo [i];
12969 if (ins->opcode == OP_REGVAR)
12970 ins->flags |= MONO_INST_GC_TRACK;
12974 if (cfg->gsharedvt) {
12975 gsharedvt_vreg_to_idx = mono_mempool_alloc0 (cfg->mempool, sizeof (int) * cfg->next_vreg);
12977 for (i = 0; i < cfg->num_varinfo; ++i) {
12978 MonoInst *ins = cfg->varinfo [i];
12981 if (mini_is_gsharedvt_variable_type (cfg, ins->inst_vtype)) {
12982 if (i >= cfg->locals_start) {
12984 idx = get_gsharedvt_info_slot (cfg, ins->inst_vtype, MONO_RGCTX_INFO_LOCAL_OFFSET);
12985 gsharedvt_vreg_to_idx [ins->dreg] = idx + 1;
12986 ins->opcode = OP_GSHAREDVT_LOCAL;
12987 ins->inst_imm = idx;
12990 gsharedvt_vreg_to_idx [ins->dreg] = -1;
12991 ins->opcode = OP_GSHAREDVT_ARG_REGOFFSET;
12997 /* FIXME: widening and truncation */
13000 * As an optimization, when a variable allocated to the stack is first loaded into
13001 * an lvreg, we will remember the lvreg and use it the next time instead of loading
13002 * the variable again.
13004 orig_next_vreg = cfg->next_vreg;
13005 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
13006 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
13010 * These arrays contain the first and last instructions accessing a given
13012 * Since we emit bblocks in the same order we process them here, and we
13013 * don't split live ranges, these will precisely describe the live range of
13014 * the variable, i.e. the instruction range where a valid value can be found
13015 * in the variables location.
13016 * The live range is computed using the liveness info computed by the liveness pass.
13017 * We can't use vmv->range, since that is an abstract live range, and we need
13018 * one which is instruction precise.
13019 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
13021 /* FIXME: Only do this if debugging info is requested */
13022 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
13023 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
13024 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
13025 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
13027 /* Add spill loads/stores */
13028 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13031 if (cfg->verbose_level > 2)
13032 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
13034 /* Clear vreg_to_lvreg array */
13035 for (i = 0; i < lvregs_len; i++)
13036 vreg_to_lvreg [lvregs [i]] = 0;
13040 MONO_BB_FOR_EACH_INS (bb, ins) {
13041 const char *spec = INS_INFO (ins->opcode);
13042 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
13043 gboolean store, no_lvreg;
13044 int sregs [MONO_MAX_SRC_REGS];
13046 if (G_UNLIKELY (cfg->verbose_level > 2))
13047 mono_print_ins (ins);
13049 if (ins->opcode == OP_NOP)
13053 * We handle LDADDR here as well, since it can only be decomposed
13054 * when variable addresses are known.
13056 if (ins->opcode == OP_LDADDR) {
13057 MonoInst *var = ins->inst_p0;
13059 if (var->opcode == OP_VTARG_ADDR) {
13060 /* Happens on SPARC/S390 where vtypes are passed by reference */
13061 MonoInst *vtaddr = var->inst_left;
13062 if (vtaddr->opcode == OP_REGVAR) {
13063 ins->opcode = OP_MOVE;
13064 ins->sreg1 = vtaddr->dreg;
13066 else if (var->inst_left->opcode == OP_REGOFFSET) {
13067 ins->opcode = OP_LOAD_MEMBASE;
13068 ins->inst_basereg = vtaddr->inst_basereg;
13069 ins->inst_offset = vtaddr->inst_offset;
13072 } else if (cfg->gsharedvt && gsharedvt_vreg_to_idx [var->dreg] < 0) {
13073 /* gsharedvt arg passed by ref */
13074 g_assert (var->opcode == OP_GSHAREDVT_ARG_REGOFFSET);
13076 ins->opcode = OP_LOAD_MEMBASE;
13077 ins->inst_basereg = var->inst_basereg;
13078 ins->inst_offset = var->inst_offset;
13079 } else if (cfg->gsharedvt && gsharedvt_vreg_to_idx [var->dreg]) {
13080 MonoInst *load, *load2, *load3;
13081 int idx = gsharedvt_vreg_to_idx [var->dreg] - 1;
13082 int reg1, reg2, reg3;
13083 MonoInst *info_var = cfg->gsharedvt_info_var;
13084 MonoInst *locals_var = cfg->gsharedvt_locals_var;
13088 * Compute the address of the local as gsharedvt_locals_var + gsharedvt_info_var->locals_offsets [idx].
13091 g_assert (var->opcode == OP_GSHAREDVT_LOCAL);
13093 g_assert (info_var);
13094 g_assert (locals_var);
13096 /* Mark the instruction used to compute the locals var as used */
13097 cfg->gsharedvt_locals_var_ins = NULL;
13099 /* Load the offset */
13100 if (info_var->opcode == OP_REGOFFSET) {
13101 reg1 = alloc_ireg (cfg);
13102 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, reg1, info_var->inst_basereg, info_var->inst_offset);
13103 } else if (info_var->opcode == OP_REGVAR) {
13105 reg1 = info_var->dreg;
13107 g_assert_not_reached ();
13109 reg2 = alloc_ireg (cfg);
13110 NEW_LOAD_MEMBASE (cfg, load2, OP_LOADI4_MEMBASE, reg2, reg1, G_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
13111 /* Load the locals area address */
13112 reg3 = alloc_ireg (cfg);
13113 if (locals_var->opcode == OP_REGOFFSET) {
13114 NEW_LOAD_MEMBASE (cfg, load3, OP_LOAD_MEMBASE, reg3, locals_var->inst_basereg, locals_var->inst_offset);
13115 } else if (locals_var->opcode == OP_REGVAR) {
13116 NEW_UNALU (cfg, load3, OP_MOVE, reg3, locals_var->dreg);
13118 g_assert_not_reached ();
13120 /* Compute the address */
13121 ins->opcode = OP_PADD;
13125 mono_bblock_insert_before_ins (bb, ins, load3);
13126 mono_bblock_insert_before_ins (bb, load3, load2);
13128 mono_bblock_insert_before_ins (bb, load2, load);
13130 g_assert (var->opcode == OP_REGOFFSET);
13132 ins->opcode = OP_ADD_IMM;
13133 ins->sreg1 = var->inst_basereg;
13134 ins->inst_imm = var->inst_offset;
13137 *need_local_opts = TRUE;
13138 spec = INS_INFO (ins->opcode);
13141 if (ins->opcode < MONO_CEE_LAST) {
13142 mono_print_ins (ins);
13143 g_assert_not_reached ();
13147 * Store opcodes have destbasereg in the dreg, but in reality, it is an
13151 if (MONO_IS_STORE_MEMBASE (ins)) {
13152 tmp_reg = ins->dreg;
13153 ins->dreg = ins->sreg2;
13154 ins->sreg2 = tmp_reg;
13157 spec2 [MONO_INST_DEST] = ' ';
13158 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
13159 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
13160 spec2 [MONO_INST_SRC3] = ' ';
13162 } else if (MONO_IS_STORE_MEMINDEX (ins))
13163 g_assert_not_reached ();
13168 if (G_UNLIKELY (cfg->verbose_level > 2)) {
13169 printf ("\t %.3s %d", spec, ins->dreg);
13170 num_sregs = mono_inst_get_src_registers (ins, sregs);
13171 for (srcindex = 0; srcindex < num_sregs; ++srcindex)
13172 printf (" %d", sregs [srcindex]);
13179 regtype = spec [MONO_INST_DEST];
13180 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
13183 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
13184 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
13185 MonoInst *store_ins;
13187 MonoInst *def_ins = ins;
13188 int dreg = ins->dreg; /* The original vreg */
13190 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
13192 if (var->opcode == OP_REGVAR) {
13193 ins->dreg = var->dreg;
13194 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
13196 * Instead of emitting a load+store, use a _membase opcode.
13198 g_assert (var->opcode == OP_REGOFFSET);
13199 if (ins->opcode == OP_MOVE) {
13203 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
13204 ins->inst_basereg = var->inst_basereg;
13205 ins->inst_offset = var->inst_offset;
13208 spec = INS_INFO (ins->opcode);
13212 g_assert (var->opcode == OP_REGOFFSET);
13214 prev_dreg = ins->dreg;
13216 /* Invalidate any previous lvreg for this vreg */
13217 vreg_to_lvreg [ins->dreg] = 0;
13221 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
13223 store_opcode = OP_STOREI8_MEMBASE_REG;
13226 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
13228 #if SIZEOF_REGISTER != 8
13229 if (regtype == 'l') {
13230 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
13231 mono_bblock_insert_after_ins (bb, ins, store_ins);
13232 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
13233 mono_bblock_insert_after_ins (bb, ins, store_ins);
13234 def_ins = store_ins;
13239 g_assert (store_opcode != OP_STOREV_MEMBASE);
13241 /* Try to fuse the store into the instruction itself */
13242 /* FIXME: Add more instructions */
13243 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
13244 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
13245 ins->inst_imm = ins->inst_c0;
13246 ins->inst_destbasereg = var->inst_basereg;
13247 ins->inst_offset = var->inst_offset;
13248 spec = INS_INFO (ins->opcode);
13249 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
13250 ins->opcode = store_opcode;
13251 ins->inst_destbasereg = var->inst_basereg;
13252 ins->inst_offset = var->inst_offset;
13256 tmp_reg = ins->dreg;
13257 ins->dreg = ins->sreg2;
13258 ins->sreg2 = tmp_reg;
13261 spec2 [MONO_INST_DEST] = ' ';
13262 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
13263 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
13264 spec2 [MONO_INST_SRC3] = ' ';
13266 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
13267 // FIXME: The backends expect the base reg to be in inst_basereg
13268 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
13270 ins->inst_basereg = var->inst_basereg;
13271 ins->inst_offset = var->inst_offset;
13272 spec = INS_INFO (ins->opcode);
13274 /* printf ("INS: "); mono_print_ins (ins); */
13275 /* Create a store instruction */
13276 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
13278 /* Insert it after the instruction */
13279 mono_bblock_insert_after_ins (bb, ins, store_ins);
13281 def_ins = store_ins;
13284 * We can't assign ins->dreg to var->dreg here, since the
13285 * sregs could use it. So set a flag, and do it after
13288 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
13289 dest_has_lvreg = TRUE;
13294 if (def_ins && !live_range_start [dreg]) {
13295 live_range_start [dreg] = def_ins;
13296 live_range_start_bb [dreg] = bb;
13299 if (cfg->compute_gc_maps && def_ins && (var->flags & MONO_INST_GC_TRACK)) {
13302 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_DEF);
13303 tmp->inst_c1 = dreg;
13304 mono_bblock_insert_after_ins (bb, def_ins, tmp);
13311 num_sregs = mono_inst_get_src_registers (ins, sregs);
13312 for (srcindex = 0; srcindex < 3; ++srcindex) {
13313 regtype = spec [MONO_INST_SRC1 + srcindex];
13314 sreg = sregs [srcindex];
13316 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
13317 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
13318 MonoInst *var = get_vreg_to_inst (cfg, sreg);
13319 MonoInst *use_ins = ins;
13320 MonoInst *load_ins;
13321 guint32 load_opcode;
13323 if (var->opcode == OP_REGVAR) {
13324 sregs [srcindex] = var->dreg;
13325 //mono_inst_set_src_registers (ins, sregs);
13326 live_range_end [sreg] = use_ins;
13327 live_range_end_bb [sreg] = bb;
13329 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
13332 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
13333 /* var->dreg is a hreg */
13334 tmp->inst_c1 = sreg;
13335 mono_bblock_insert_after_ins (bb, ins, tmp);
13341 g_assert (var->opcode == OP_REGOFFSET);
13343 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
13345 g_assert (load_opcode != OP_LOADV_MEMBASE);
13347 if (vreg_to_lvreg [sreg]) {
13348 g_assert (vreg_to_lvreg [sreg] != -1);
13350 /* The variable is already loaded to an lvreg */
13351 if (G_UNLIKELY (cfg->verbose_level > 2))
13352 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
13353 sregs [srcindex] = vreg_to_lvreg [sreg];
13354 //mono_inst_set_src_registers (ins, sregs);
13358 /* Try to fuse the load into the instruction */
13359 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
13360 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
13361 sregs [0] = var->inst_basereg;
13362 //mono_inst_set_src_registers (ins, sregs);
13363 ins->inst_offset = var->inst_offset;
13364 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
13365 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
13366 sregs [1] = var->inst_basereg;
13367 //mono_inst_set_src_registers (ins, sregs);
13368 ins->inst_offset = var->inst_offset;
13370 if (MONO_IS_REAL_MOVE (ins)) {
13371 ins->opcode = OP_NOP;
13374 //printf ("%d ", srcindex); mono_print_ins (ins);
13376 sreg = alloc_dreg (cfg, stacktypes [regtype]);
13378 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
13379 if (var->dreg == prev_dreg) {
13381 * sreg refers to the value loaded by the load
13382 * emitted below, but we need to use ins->dreg
13383 * since it refers to the store emitted earlier.
13387 g_assert (sreg != -1);
13388 vreg_to_lvreg [var->dreg] = sreg;
13389 g_assert (lvregs_len < 1024);
13390 lvregs [lvregs_len ++] = var->dreg;
13394 sregs [srcindex] = sreg;
13395 //mono_inst_set_src_registers (ins, sregs);
13397 #if SIZEOF_REGISTER != 8
13398 if (regtype == 'l') {
13399 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
13400 mono_bblock_insert_before_ins (bb, ins, load_ins);
13401 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
13402 mono_bblock_insert_before_ins (bb, ins, load_ins);
13403 use_ins = load_ins;
13408 #if SIZEOF_REGISTER == 4
13409 g_assert (load_opcode != OP_LOADI8_MEMBASE);
13411 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
13412 mono_bblock_insert_before_ins (bb, ins, load_ins);
13413 use_ins = load_ins;
13417 if (var->dreg < orig_next_vreg) {
13418 live_range_end [var->dreg] = use_ins;
13419 live_range_end_bb [var->dreg] = bb;
13422 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
13425 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
13426 tmp->inst_c1 = var->dreg;
13427 mono_bblock_insert_after_ins (bb, ins, tmp);
13431 mono_inst_set_src_registers (ins, sregs);
13433 if (dest_has_lvreg) {
13434 g_assert (ins->dreg != -1);
13435 vreg_to_lvreg [prev_dreg] = ins->dreg;
13436 g_assert (lvregs_len < 1024);
13437 lvregs [lvregs_len ++] = prev_dreg;
13438 dest_has_lvreg = FALSE;
13442 tmp_reg = ins->dreg;
13443 ins->dreg = ins->sreg2;
13444 ins->sreg2 = tmp_reg;
13447 if (MONO_IS_CALL (ins)) {
13448 /* Clear vreg_to_lvreg array */
13449 for (i = 0; i < lvregs_len; i++)
13450 vreg_to_lvreg [lvregs [i]] = 0;
13452 } else if (ins->opcode == OP_NOP) {
13454 MONO_INST_NULLIFY_SREGS (ins);
13457 if (cfg->verbose_level > 2)
13458 mono_print_ins_index (1, ins);
13461 /* Extend the live range based on the liveness info */
13462 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
13463 for (i = 0; i < cfg->num_varinfo; i ++) {
13464 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
13466 if (vreg_is_volatile (cfg, vi->vreg))
13467 /* The liveness info is incomplete */
13470 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
13471 /* Live from at least the first ins of this bb */
13472 live_range_start [vi->vreg] = bb->code;
13473 live_range_start_bb [vi->vreg] = bb;
13476 if (mono_bitset_test_fast (bb->live_out_set, i)) {
13477 /* Live at least until the last ins of this bb */
13478 live_range_end [vi->vreg] = bb->last_ins;
13479 live_range_end_bb [vi->vreg] = bb;
13485 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
13487 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
13488 * by storing the current native offset into MonoMethodVar->live_range_start/end.
13490 if (cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
13491 for (i = 0; i < cfg->num_varinfo; ++i) {
13492 int vreg = MONO_VARINFO (cfg, i)->vreg;
13495 if (live_range_start [vreg]) {
13496 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
13498 ins->inst_c1 = vreg;
13499 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
13501 if (live_range_end [vreg]) {
13502 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
13504 ins->inst_c1 = vreg;
13505 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
13506 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
13508 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
13514 if (cfg->gsharedvt_locals_var_ins) {
13515 /* Nullify if unused */
13516 cfg->gsharedvt_locals_var_ins->opcode = OP_PCONST;
13517 cfg->gsharedvt_locals_var_ins->inst_imm = 0;
13520 g_free (live_range_start);
13521 g_free (live_range_end);
13522 g_free (live_range_start_bb);
13523 g_free (live_range_end_bb);
13528 * - use 'iadd' instead of 'int_add'
13529 * - handling ovf opcodes: decompose in method_to_ir.
13530 * - unify iregs/fregs
13531 * -> partly done, the missing parts are:
13532 * - a more complete unification would involve unifying the hregs as well, so
13533 * code wouldn't need if (fp) all over the place. but that would mean the hregs
13534 * would no longer map to the machine hregs, so the code generators would need to
13535 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
13536 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
13537 * fp/non-fp branches speeds it up by about 15%.
13538 * - use sext/zext opcodes instead of shifts
13540 * - get rid of TEMPLOADs if possible and use vregs instead
13541 * - clean up usage of OP_P/OP_ opcodes
13542 * - cleanup usage of DUMMY_USE
13543 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
13545 * - set the stack type and allocate a dreg in the EMIT_NEW macros
13546 * - get rid of all the <foo>2 stuff when the new JIT is ready.
13547 * - make sure handle_stack_args () is called before the branch is emitted
13548 * - when the new IR is done, get rid of all unused stuff
13549 * - COMPARE/BEQ as separate instructions or unify them ?
13550 * - keeping them separate allows specialized compare instructions like
13551 * compare_imm, compare_membase
13552 * - most back ends unify fp compare+branch, fp compare+ceq
13553 * - integrate mono_save_args into inline_method
13554 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
13555 * - handle long shift opts on 32 bit platforms somehow: they require
13556 * 3 sregs (2 for arg1 and 1 for arg2)
13557 * - make byref a 'normal' type.
13558 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
13559 * variable if needed.
13560 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
13561 * like inline_method.
13562 * - remove inlining restrictions
13563 * - fix LNEG and enable cfold of INEG
13564 * - generalize x86 optimizations like ldelema as a peephole optimization
13565 * - add store_mem_imm for amd64
13566 * - optimize the loading of the interruption flag in the managed->native wrappers
13567 * - avoid special handling of OP_NOP in passes
13568 * - move code inserting instructions into one function/macro.
13569 * - try a coalescing phase after liveness analysis
13570 * - add float -> vreg conversion + local optimizations on !x86
13571 * - figure out how to handle decomposed branches during optimizations, ie.
13572 * compare+branch, op_jump_table+op_br etc.
13573 * - promote RuntimeXHandles to vregs
13574 * - vtype cleanups:
13575 * - add a NEW_VARLOADA_VREG macro
13576 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
13577 * accessing vtype fields.
13578 * - get rid of I8CONST on 64 bit platforms
13579 * - dealing with the increase in code size due to branches created during opcode
13581 * - use extended basic blocks
13582 * - all parts of the JIT
13583 * - handle_global_vregs () && local regalloc
13584 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
13585 * - sources of increase in code size:
13588 * - isinst and castclass
13589 * - lvregs not allocated to global registers even if used multiple times
13590 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
13592 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
13593 * - add all micro optimizations from the old JIT
13594 * - put tree optimizations into the deadce pass
13595 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
13596 * specific function.
13597 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
13598 * fcompare + branchCC.
13599 * - create a helper function for allocating a stack slot, taking into account
13600 * MONO_CFG_HAS_SPILLUP.
13602 * - merge the ia64 switch changes.
13603 * - optimize mono_regstate2_alloc_int/float.
13604 * - fix the pessimistic handling of variables accessed in exception handler blocks.
13605 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
13606 * parts of the tree could be separated by other instructions, killing the tree
13607 * arguments, or stores killing loads etc. Also, should we fold loads into other
13608 * instructions if the result of the load is used multiple times ?
13609 * - make the REM_IMM optimization in mini-x86.c arch-independent.
13610 * - LAST MERGE: 108395.
13611 * - when returning vtypes in registers, generate IR and append it to the end of the
13612 * last bb instead of doing it in the epilog.
13613 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
13621 - When to decompose opcodes:
13622 - earlier: this makes some optimizations hard to implement, since the low level IR
13623 no longer contains the neccessary information. But it is easier to do.
13624 - later: harder to implement, enables more optimizations.
13625 - Branches inside bblocks:
13626 - created when decomposing complex opcodes.
13627 - branches to another bblock: harmless, but not tracked by the branch
13628 optimizations, so need to branch to a label at the start of the bblock.
13629 - branches to inside the same bblock: very problematic, trips up the local
13630 reg allocator. Can be fixed by spitting the current bblock, but that is a
13631 complex operation, since some local vregs can become global vregs etc.
13632 - Local/global vregs:
13633 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
13634 local register allocator.
13635 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
13636 structure, created by mono_create_var (). Assigned to hregs or the stack by
13637 the global register allocator.
13638 - When to do optimizations like alu->alu_imm:
13639 - earlier -> saves work later on since the IR will be smaller/simpler
13640 - later -> can work on more instructions
13641 - Handling of valuetypes:
13642 - When a vtype is pushed on the stack, a new temporary is created, an
13643 instruction computing its address (LDADDR) is emitted and pushed on
13644 the stack. Need to optimize cases when the vtype is used immediately as in
13645 argument passing, stloc etc.
13646 - Instead of the to_end stuff in the old JIT, simply call the function handling
13647 the values on the stack before emitting the last instruction of the bb.
13650 #endif /* DISABLE_JIT */