2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
9 * Copyright 2003-2010 Novell, Inc (http://www.novell.com)
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
24 #ifdef HAVE_SYS_TIME_H
32 #include <mono/utils/memcheck.h>
34 #include <mono/metadata/assembly.h>
35 #include <mono/metadata/attrdefs.h>
36 #include <mono/metadata/loader.h>
37 #include <mono/metadata/tabledefs.h>
38 #include <mono/metadata/class.h>
39 #include <mono/metadata/object.h>
40 #include <mono/metadata/exception.h>
41 #include <mono/metadata/opcodes.h>
42 #include <mono/metadata/mono-endian.h>
43 #include <mono/metadata/tokentype.h>
44 #include <mono/metadata/tabledefs.h>
45 #include <mono/metadata/marshal.h>
46 #include <mono/metadata/debug-helpers.h>
47 #include <mono/metadata/mono-debug.h>
48 #include <mono/metadata/gc-internal.h>
49 #include <mono/metadata/security-manager.h>
50 #include <mono/metadata/threads-types.h>
51 #include <mono/metadata/security-core-clr.h>
52 #include <mono/metadata/monitor.h>
53 #include <mono/metadata/profiler-private.h>
54 #include <mono/metadata/profiler.h>
55 #include <mono/utils/mono-compiler.h>
56 #include <mono/utils/mono-memory-model.h>
57 #include <mono/metadata/mono-basic-block.h>
64 #include "jit-icalls.h"
66 #include "debugger-agent.h"
68 #define BRANCH_COST 10
69 #define INLINE_LENGTH_LIMIT 20
70 #define INLINE_FAILURE do {\
71 if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE))\
74 #define CHECK_CFG_EXCEPTION do {\
75 if (cfg->exception_type != MONO_EXCEPTION_NONE)\
78 #define METHOD_ACCESS_FAILURE do { \
79 char *method_fname = mono_method_full_name (method, TRUE); \
80 char *cil_method_fname = mono_method_full_name (cil_method, TRUE); \
81 mono_cfg_set_exception (cfg, MONO_EXCEPTION_METHOD_ACCESS); \
82 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname); \
83 g_free (method_fname); \
84 g_free (cil_method_fname); \
85 goto exception_exit; \
87 #define FIELD_ACCESS_FAILURE do { \
88 char *method_fname = mono_method_full_name (method, TRUE); \
89 char *field_fname = mono_field_full_name (field); \
90 mono_cfg_set_exception (cfg, MONO_EXCEPTION_FIELD_ACCESS); \
91 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); \
92 g_free (method_fname); \
93 g_free (field_fname); \
94 goto exception_exit; \
96 #define GENERIC_SHARING_FAILURE(opcode) do { \
97 if (cfg->generic_sharing_context) { \
98 if (cfg->verbose_level > 2) \
99 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __LINE__); \
100 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
101 goto exception_exit; \
104 #define OUT_OF_MEMORY_FAILURE do { \
105 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OUT_OF_MEMORY); \
106 goto exception_exit; \
108 /* Determine whenever 'ins' represents a load of the 'this' argument */
109 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
111 static int ldind_to_load_membase (int opcode);
112 static int stind_to_store_membase (int opcode);
114 int mono_op_to_op_imm (int opcode);
115 int mono_op_to_op_imm_noemul (int opcode);
117 MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
118 void mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native);
119 void mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass);
121 /* helper methods signatures */
122 static MonoMethodSignature *helper_sig_class_init_trampoline = NULL;
123 static MonoMethodSignature *helper_sig_domain_get = NULL;
124 static MonoMethodSignature *helper_sig_generic_class_init_trampoline = NULL;
125 static MonoMethodSignature *helper_sig_generic_class_init_trampoline_llvm = NULL;
126 static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline = NULL;
127 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline = NULL;
128 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline_llvm = NULL;
131 * Instruction metadata
139 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
140 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
146 #if SIZEOF_REGISTER == 8
151 /* keep in sync with the enum in mini.h */
154 #include "mini-ops.h"
159 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
160 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
162 * This should contain the index of the last sreg + 1. This is not the same
163 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
165 const gint8 ins_sreg_counts[] = {
166 #include "mini-ops.h"
171 #define MONO_INIT_VARINFO(vi,id) do { \
172 (vi)->range.first_use.pos.bid = 0xffff; \
178 mono_inst_set_src_registers (MonoInst *ins, int *regs)
180 ins->sreg1 = regs [0];
181 ins->sreg2 = regs [1];
182 ins->sreg3 = regs [2];
186 mono_alloc_ireg (MonoCompile *cfg)
188 return alloc_ireg (cfg);
192 mono_alloc_freg (MonoCompile *cfg)
194 return alloc_freg (cfg);
198 mono_alloc_preg (MonoCompile *cfg)
200 return alloc_preg (cfg);
204 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
206 return alloc_dreg (cfg, stack_type);
210 * mono_alloc_ireg_ref:
212 * Allocate an IREG, and mark it as holding a GC ref.
215 mono_alloc_ireg_ref (MonoCompile *cfg)
217 return alloc_ireg_ref (cfg);
221 * mono_alloc_ireg_mp:
223 * Allocate an IREG, and mark it as holding a managed pointer.
226 mono_alloc_ireg_mp (MonoCompile *cfg)
228 return alloc_ireg_mp (cfg);
232 * mono_alloc_ireg_copy:
234 * Allocate an IREG with the same GC type as VREG.
237 mono_alloc_ireg_copy (MonoCompile *cfg, guint32 vreg)
239 if (vreg_is_ref (cfg, vreg))
240 return alloc_ireg_ref (cfg);
241 else if (vreg_is_mp (cfg, vreg))
242 return alloc_ireg_mp (cfg);
244 return alloc_ireg (cfg);
248 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
254 switch (type->type) {
257 case MONO_TYPE_BOOLEAN:
269 case MONO_TYPE_FNPTR:
271 case MONO_TYPE_CLASS:
272 case MONO_TYPE_STRING:
273 case MONO_TYPE_OBJECT:
274 case MONO_TYPE_SZARRAY:
275 case MONO_TYPE_ARRAY:
279 #if SIZEOF_REGISTER == 8
288 case MONO_TYPE_VALUETYPE:
289 if (type->data.klass->enumtype) {
290 type = mono_class_enum_basetype (type->data.klass);
293 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
296 case MONO_TYPE_TYPEDBYREF:
298 case MONO_TYPE_GENERICINST:
299 type = &type->data.generic_class->container_class->byval_arg;
303 g_assert (cfg->generic_sharing_context);
306 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
312 mono_print_bb (MonoBasicBlock *bb, const char *msg)
317 printf ("\n%s %d: [IN: ", msg, bb->block_num);
318 for (i = 0; i < bb->in_count; ++i)
319 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
321 for (i = 0; i < bb->out_count; ++i)
322 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
324 for (tree = bb->code; tree; tree = tree->next)
325 mono_print_ins_index (-1, tree);
329 mono_create_helper_signatures (void)
331 helper_sig_domain_get = mono_create_icall_signature ("ptr");
332 helper_sig_class_init_trampoline = mono_create_icall_signature ("void");
333 helper_sig_generic_class_init_trampoline = mono_create_icall_signature ("void");
334 helper_sig_generic_class_init_trampoline_llvm = mono_create_icall_signature ("void ptr");
335 helper_sig_rgctx_lazy_fetch_trampoline = mono_create_icall_signature ("ptr ptr");
336 helper_sig_monitor_enter_exit_trampoline = mono_create_icall_signature ("void");
337 helper_sig_monitor_enter_exit_trampoline_llvm = mono_create_icall_signature ("void object");
341 * Can't put this at the beginning, since other files reference stuff from this
346 #define UNVERIFIED do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto unverified; } while (0)
348 #define LOAD_ERROR do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto load_error; } while (0)
350 #define GET_BBLOCK(cfg,tblock,ip) do { \
351 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
353 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
354 NEW_BBLOCK (cfg, (tblock)); \
355 (tblock)->cil_code = (ip); \
356 ADD_BBLOCK (cfg, (tblock)); \
360 #if defined(TARGET_X86) || defined(TARGET_AMD64)
361 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
362 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
363 (dest)->dreg = alloc_ireg_mp ((cfg)); \
364 (dest)->sreg1 = (sr1); \
365 (dest)->sreg2 = (sr2); \
366 (dest)->inst_imm = (imm); \
367 (dest)->backend.shift_amount = (shift); \
368 MONO_ADD_INS ((cfg)->cbb, (dest)); \
372 #if SIZEOF_REGISTER == 8
373 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
374 /* FIXME: Need to add many more cases */ \
375 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
377 int dr = alloc_preg (cfg); \
378 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
379 (ins)->sreg2 = widen->dreg; \
383 #define ADD_WIDEN_OP(ins, arg1, arg2)
386 #define ADD_BINOP(op) do { \
387 MONO_INST_NEW (cfg, ins, (op)); \
389 ins->sreg1 = sp [0]->dreg; \
390 ins->sreg2 = sp [1]->dreg; \
391 type_from_op (ins, sp [0], sp [1]); \
393 /* Have to insert a widening op */ \
394 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
395 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
396 MONO_ADD_INS ((cfg)->cbb, (ins)); \
397 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
400 #define ADD_UNOP(op) do { \
401 MONO_INST_NEW (cfg, ins, (op)); \
403 ins->sreg1 = sp [0]->dreg; \
404 type_from_op (ins, sp [0], NULL); \
406 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
407 MONO_ADD_INS ((cfg)->cbb, (ins)); \
408 *sp++ = mono_decompose_opcode (cfg, ins); \
411 #define ADD_BINCOND(next_block) do { \
414 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
415 cmp->sreg1 = sp [0]->dreg; \
416 cmp->sreg2 = sp [1]->dreg; \
417 type_from_op (cmp, sp [0], sp [1]); \
419 type_from_op (ins, sp [0], sp [1]); \
420 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
421 GET_BBLOCK (cfg, tblock, target); \
422 link_bblock (cfg, bblock, tblock); \
423 ins->inst_true_bb = tblock; \
424 if ((next_block)) { \
425 link_bblock (cfg, bblock, (next_block)); \
426 ins->inst_false_bb = (next_block); \
427 start_new_bblock = 1; \
429 GET_BBLOCK (cfg, tblock, ip); \
430 link_bblock (cfg, bblock, tblock); \
431 ins->inst_false_bb = tblock; \
432 start_new_bblock = 2; \
434 if (sp != stack_start) { \
435 handle_stack_args (cfg, stack_start, sp - stack_start); \
436 CHECK_UNVERIFIABLE (cfg); \
438 MONO_ADD_INS (bblock, cmp); \
439 MONO_ADD_INS (bblock, ins); \
443 * link_bblock: Links two basic blocks
445 * links two basic blocks in the control flow graph, the 'from'
446 * argument is the starting block and the 'to' argument is the block
447 * the control flow ends to after 'from'.
450 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
452 MonoBasicBlock **newa;
456 if (from->cil_code) {
458 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
460 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
463 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
465 printf ("edge from entry to exit\n");
470 for (i = 0; i < from->out_count; ++i) {
471 if (to == from->out_bb [i]) {
477 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
478 for (i = 0; i < from->out_count; ++i) {
479 newa [i] = from->out_bb [i];
487 for (i = 0; i < to->in_count; ++i) {
488 if (from == to->in_bb [i]) {
494 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
495 for (i = 0; i < to->in_count; ++i) {
496 newa [i] = to->in_bb [i];
505 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
507 link_bblock (cfg, from, to);
511 * mono_find_block_region:
513 * We mark each basic block with a region ID. We use that to avoid BB
514 * optimizations when blocks are in different regions.
517 * A region token that encodes where this region is, and information
518 * about the clause owner for this block.
520 * The region encodes the try/catch/filter clause that owns this block
521 * as well as the type. -1 is a special value that represents a block
522 * that is in none of try/catch/filter.
525 mono_find_block_region (MonoCompile *cfg, int offset)
527 MonoMethodHeader *header = cfg->header;
528 MonoExceptionClause *clause;
531 for (i = 0; i < header->num_clauses; ++i) {
532 clause = &header->clauses [i];
533 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
534 (offset < (clause->handler_offset)))
535 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
537 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
538 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
539 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
540 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
541 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
543 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
546 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
547 return ((i + 1) << 8) | clause->flags;
554 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
556 MonoMethodHeader *header = cfg->header;
557 MonoExceptionClause *clause;
561 for (i = 0; i < header->num_clauses; ++i) {
562 clause = &header->clauses [i];
563 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
564 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
565 if (clause->flags == type)
566 res = g_list_append (res, clause);
573 mono_create_spvar_for_region (MonoCompile *cfg, int region)
577 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
581 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
582 /* prevent it from being register allocated */
583 var->flags |= MONO_INST_INDIRECT;
585 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
589 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
591 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
595 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
599 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
603 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
604 /* prevent it from being register allocated */
605 var->flags |= MONO_INST_INDIRECT;
607 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
613 * Returns the type used in the eval stack when @type is loaded.
614 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
617 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
621 inst->klass = klass = mono_class_from_mono_type (type);
623 inst->type = STACK_MP;
628 switch (type->type) {
630 inst->type = STACK_INV;
634 case MONO_TYPE_BOOLEAN:
640 inst->type = STACK_I4;
645 case MONO_TYPE_FNPTR:
646 inst->type = STACK_PTR;
648 case MONO_TYPE_CLASS:
649 case MONO_TYPE_STRING:
650 case MONO_TYPE_OBJECT:
651 case MONO_TYPE_SZARRAY:
652 case MONO_TYPE_ARRAY:
653 inst->type = STACK_OBJ;
657 inst->type = STACK_I8;
661 inst->type = STACK_R8;
663 case MONO_TYPE_VALUETYPE:
664 if (type->data.klass->enumtype) {
665 type = mono_class_enum_basetype (type->data.klass);
669 inst->type = STACK_VTYPE;
672 case MONO_TYPE_TYPEDBYREF:
673 inst->klass = mono_defaults.typed_reference_class;
674 inst->type = STACK_VTYPE;
676 case MONO_TYPE_GENERICINST:
677 type = &type->data.generic_class->container_class->byval_arg;
680 case MONO_TYPE_MVAR :
681 /* FIXME: all the arguments must be references for now,
682 * later look inside cfg and see if the arg num is
685 g_assert (cfg->generic_sharing_context);
686 inst->type = STACK_OBJ;
689 g_error ("unknown type 0x%02x in eval stack type", type->type);
694 * The following tables are used to quickly validate the IL code in type_from_op ().
697 bin_num_table [STACK_MAX] [STACK_MAX] = {
698 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
699 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
700 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
701 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
702 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV},
703 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
704 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
705 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
710 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV
713 /* reduce the size of this table */
715 bin_int_table [STACK_MAX] [STACK_MAX] = {
716 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
717 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
718 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
719 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
720 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
721 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
722 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
723 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
727 bin_comp_table [STACK_MAX] [STACK_MAX] = {
728 /* Inv i L p F & O vt */
730 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
731 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
732 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
733 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
734 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
735 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
736 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
739 /* reduce the size of this table */
741 shift_table [STACK_MAX] [STACK_MAX] = {
742 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
743 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
744 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
745 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
746 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
747 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
748 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
749 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
753 * Tables to map from the non-specific opcode to the matching
754 * type-specific opcode.
756 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
758 binops_op_map [STACK_MAX] = {
759 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD
762 /* handles from CEE_NEG to CEE_CONV_U8 */
764 unops_op_map [STACK_MAX] = {
765 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG
768 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
770 ovfops_op_map [STACK_MAX] = {
771 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2
774 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
776 ovf2ops_op_map [STACK_MAX] = {
777 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
780 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
782 ovf3ops_op_map [STACK_MAX] = {
783 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1
786 /* handles from CEE_BEQ to CEE_BLT_UN */
788 beqops_op_map [STACK_MAX] = {
789 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ
792 /* handles from CEE_CEQ to CEE_CLT_UN */
794 ceqops_op_map [STACK_MAX] = {
795 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ
799 * Sets ins->type (the type on the eval stack) according to the
800 * type of the opcode and the arguments to it.
801 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
803 * FIXME: this function sets ins->type unconditionally in some cases, but
804 * it should set it to invalid for some types (a conv.x on an object)
807 type_from_op (MonoInst *ins, MonoInst *src1, MonoInst *src2) {
809 switch (ins->opcode) {
816 /* FIXME: check unverifiable args for STACK_MP */
817 ins->type = bin_num_table [src1->type] [src2->type];
818 ins->opcode += binops_op_map [ins->type];
825 ins->type = bin_int_table [src1->type] [src2->type];
826 ins->opcode += binops_op_map [ins->type];
831 ins->type = shift_table [src1->type] [src2->type];
832 ins->opcode += binops_op_map [ins->type];
837 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
838 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
839 ins->opcode = OP_LCOMPARE;
840 else if (src1->type == STACK_R8)
841 ins->opcode = OP_FCOMPARE;
843 ins->opcode = OP_ICOMPARE;
845 case OP_ICOMPARE_IMM:
846 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
847 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
848 ins->opcode = OP_LCOMPARE_IMM;
860 ins->opcode += beqops_op_map [src1->type];
863 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
864 ins->opcode += ceqops_op_map [src1->type];
870 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
871 ins->opcode += ceqops_op_map [src1->type];
875 ins->type = neg_table [src1->type];
876 ins->opcode += unops_op_map [ins->type];
879 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
880 ins->type = src1->type;
882 ins->type = STACK_INV;
883 ins->opcode += unops_op_map [ins->type];
889 ins->type = STACK_I4;
890 ins->opcode += unops_op_map [src1->type];
893 ins->type = STACK_R8;
894 switch (src1->type) {
897 ins->opcode = OP_ICONV_TO_R_UN;
900 ins->opcode = OP_LCONV_TO_R_UN;
904 case CEE_CONV_OVF_I1:
905 case CEE_CONV_OVF_U1:
906 case CEE_CONV_OVF_I2:
907 case CEE_CONV_OVF_U2:
908 case CEE_CONV_OVF_I4:
909 case CEE_CONV_OVF_U4:
910 ins->type = STACK_I4;
911 ins->opcode += ovf3ops_op_map [src1->type];
913 case CEE_CONV_OVF_I_UN:
914 case CEE_CONV_OVF_U_UN:
915 ins->type = STACK_PTR;
916 ins->opcode += ovf2ops_op_map [src1->type];
918 case CEE_CONV_OVF_I1_UN:
919 case CEE_CONV_OVF_I2_UN:
920 case CEE_CONV_OVF_I4_UN:
921 case CEE_CONV_OVF_U1_UN:
922 case CEE_CONV_OVF_U2_UN:
923 case CEE_CONV_OVF_U4_UN:
924 ins->type = STACK_I4;
925 ins->opcode += ovf2ops_op_map [src1->type];
928 ins->type = STACK_PTR;
929 switch (src1->type) {
931 ins->opcode = OP_ICONV_TO_U;
935 #if SIZEOF_VOID_P == 8
936 ins->opcode = OP_LCONV_TO_U;
938 ins->opcode = OP_MOVE;
942 ins->opcode = OP_LCONV_TO_U;
945 ins->opcode = OP_FCONV_TO_U;
951 ins->type = STACK_I8;
952 ins->opcode += unops_op_map [src1->type];
954 case CEE_CONV_OVF_I8:
955 case CEE_CONV_OVF_U8:
956 ins->type = STACK_I8;
957 ins->opcode += ovf3ops_op_map [src1->type];
959 case CEE_CONV_OVF_U8_UN:
960 case CEE_CONV_OVF_I8_UN:
961 ins->type = STACK_I8;
962 ins->opcode += ovf2ops_op_map [src1->type];
966 ins->type = STACK_R8;
967 ins->opcode += unops_op_map [src1->type];
970 ins->type = STACK_R8;
974 ins->type = STACK_I4;
975 ins->opcode += ovfops_op_map [src1->type];
980 ins->type = STACK_PTR;
981 ins->opcode += ovfops_op_map [src1->type];
989 ins->type = bin_num_table [src1->type] [src2->type];
990 ins->opcode += ovfops_op_map [src1->type];
991 if (ins->type == STACK_R8)
992 ins->type = STACK_INV;
994 case OP_LOAD_MEMBASE:
995 ins->type = STACK_PTR;
997 case OP_LOADI1_MEMBASE:
998 case OP_LOADU1_MEMBASE:
999 case OP_LOADI2_MEMBASE:
1000 case OP_LOADU2_MEMBASE:
1001 case OP_LOADI4_MEMBASE:
1002 case OP_LOADU4_MEMBASE:
1003 ins->type = STACK_PTR;
1005 case OP_LOADI8_MEMBASE:
1006 ins->type = STACK_I8;
1008 case OP_LOADR4_MEMBASE:
1009 case OP_LOADR8_MEMBASE:
1010 ins->type = STACK_R8;
1013 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
1017 if (ins->type == STACK_MP)
1018 ins->klass = mono_defaults.object_class;
1023 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
1029 param_table [STACK_MAX] [STACK_MAX] = {
1034 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
1038 switch (args->type) {
1048 for (i = 0; i < sig->param_count; ++i) {
1049 switch (args [i].type) {
1053 if (!sig->params [i]->byref)
1057 if (sig->params [i]->byref)
1059 switch (sig->params [i]->type) {
1060 case MONO_TYPE_CLASS:
1061 case MONO_TYPE_STRING:
1062 case MONO_TYPE_OBJECT:
1063 case MONO_TYPE_SZARRAY:
1064 case MONO_TYPE_ARRAY:
1071 if (sig->params [i]->byref)
1073 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1082 /*if (!param_table [args [i].type] [sig->params [i]->type])
1090 * When we need a pointer to the current domain many times in a method, we
1091 * call mono_domain_get() once and we store the result in a local variable.
1092 * This function returns the variable that represents the MonoDomain*.
1094 inline static MonoInst *
1095 mono_get_domainvar (MonoCompile *cfg)
1097 if (!cfg->domainvar)
1098 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1099 return cfg->domainvar;
1103 * The got_var contains the address of the Global Offset Table when AOT
1107 mono_get_got_var (MonoCompile *cfg)
1109 #ifdef MONO_ARCH_NEED_GOT_VAR
1110 if (!cfg->compile_aot)
1112 if (!cfg->got_var) {
1113 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1115 return cfg->got_var;
1122 mono_get_vtable_var (MonoCompile *cfg)
1124 g_assert (cfg->generic_sharing_context);
1126 if (!cfg->rgctx_var) {
1127 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1128 /* force the var to be stack allocated */
1129 cfg->rgctx_var->flags |= MONO_INST_INDIRECT;
1132 return cfg->rgctx_var;
1136 type_from_stack_type (MonoInst *ins) {
1137 switch (ins->type) {
1138 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1139 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1140 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1141 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1143 return &ins->klass->this_arg;
1144 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1145 case STACK_VTYPE: return &ins->klass->byval_arg;
1147 g_error ("stack type %d to monotype not handled\n", ins->type);
1152 static G_GNUC_UNUSED int
1153 type_to_stack_type (MonoType *t)
1155 t = mono_type_get_underlying_type (t);
1159 case MONO_TYPE_BOOLEAN:
1162 case MONO_TYPE_CHAR:
1169 case MONO_TYPE_FNPTR:
1171 case MONO_TYPE_CLASS:
1172 case MONO_TYPE_STRING:
1173 case MONO_TYPE_OBJECT:
1174 case MONO_TYPE_SZARRAY:
1175 case MONO_TYPE_ARRAY:
1183 case MONO_TYPE_VALUETYPE:
1184 case MONO_TYPE_TYPEDBYREF:
1186 case MONO_TYPE_GENERICINST:
1187 if (mono_type_generic_inst_is_valuetype (t))
1193 g_assert_not_reached ();
1200 array_access_to_klass (int opcode)
1204 return mono_defaults.byte_class;
1206 return mono_defaults.uint16_class;
1209 return mono_defaults.int_class;
1212 return mono_defaults.sbyte_class;
1215 return mono_defaults.int16_class;
1218 return mono_defaults.int32_class;
1220 return mono_defaults.uint32_class;
1223 return mono_defaults.int64_class;
1226 return mono_defaults.single_class;
1229 return mono_defaults.double_class;
1230 case CEE_LDELEM_REF:
1231 case CEE_STELEM_REF:
1232 return mono_defaults.object_class;
1234 g_assert_not_reached ();
1240 * We try to share variables when possible
1243 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1248 /* inlining can result in deeper stacks */
1249 if (slot >= cfg->header->max_stack)
1250 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1252 pos = ins->type - 1 + slot * STACK_MAX;
1254 switch (ins->type) {
1261 if ((vnum = cfg->intvars [pos]))
1262 return cfg->varinfo [vnum];
1263 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1264 cfg->intvars [pos] = res->inst_c0;
1267 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1273 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1276 * Don't use this if a generic_context is set, since that means AOT can't
1277 * look up the method using just the image+token.
1278 * table == 0 means this is a reference made from a wrapper.
1280 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1281 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1282 jump_info_token->image = image;
1283 jump_info_token->token = token;
1284 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1289 * This function is called to handle items that are left on the evaluation stack
1290 * at basic block boundaries. What happens is that we save the values to local variables
1291 * and we reload them later when first entering the target basic block (with the
1292 * handle_loaded_temps () function).
1293 * A single joint point will use the same variables (stored in the array bb->out_stack or
1294 * bb->in_stack, if the basic block is before or after the joint point).
1296 * This function needs to be called _before_ emitting the last instruction of
1297 * the bb (i.e. before emitting a branch).
1298 * If the stack merge fails at a join point, cfg->unverifiable is set.
1301 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1304 MonoBasicBlock *bb = cfg->cbb;
1305 MonoBasicBlock *outb;
1306 MonoInst *inst, **locals;
1311 if (cfg->verbose_level > 3)
1312 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1313 if (!bb->out_scount) {
1314 bb->out_scount = count;
1315 //printf ("bblock %d has out:", bb->block_num);
1317 for (i = 0; i < bb->out_count; ++i) {
1318 outb = bb->out_bb [i];
1319 /* exception handlers are linked, but they should not be considered for stack args */
1320 if (outb->flags & BB_EXCEPTION_HANDLER)
1322 //printf (" %d", outb->block_num);
1323 if (outb->in_stack) {
1325 bb->out_stack = outb->in_stack;
1331 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1332 for (i = 0; i < count; ++i) {
1334 * try to reuse temps already allocated for this purpouse, if they occupy the same
1335 * stack slot and if they are of the same type.
1336 * This won't cause conflicts since if 'local' is used to
1337 * store one of the values in the in_stack of a bblock, then
1338 * the same variable will be used for the same outgoing stack
1340 * This doesn't work when inlining methods, since the bblocks
1341 * in the inlined methods do not inherit their in_stack from
1342 * the bblock they are inlined to. See bug #58863 for an
1345 if (cfg->inlined_method)
1346 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1348 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1353 for (i = 0; i < bb->out_count; ++i) {
1354 outb = bb->out_bb [i];
1355 /* exception handlers are linked, but they should not be considered for stack args */
1356 if (outb->flags & BB_EXCEPTION_HANDLER)
1358 if (outb->in_scount) {
1359 if (outb->in_scount != bb->out_scount) {
1360 cfg->unverifiable = TRUE;
1363 continue; /* check they are the same locals */
1365 outb->in_scount = count;
1366 outb->in_stack = bb->out_stack;
1369 locals = bb->out_stack;
1371 for (i = 0; i < count; ++i) {
1372 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1373 inst->cil_code = sp [i]->cil_code;
1374 sp [i] = locals [i];
1375 if (cfg->verbose_level > 3)
1376 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1380 * It is possible that the out bblocks already have in_stack assigned, and
1381 * the in_stacks differ. In this case, we will store to all the different
1388 /* Find a bblock which has a different in_stack */
1390 while (bindex < bb->out_count) {
1391 outb = bb->out_bb [bindex];
1392 /* exception handlers are linked, but they should not be considered for stack args */
1393 if (outb->flags & BB_EXCEPTION_HANDLER) {
1397 if (outb->in_stack != locals) {
1398 for (i = 0; i < count; ++i) {
1399 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1400 inst->cil_code = sp [i]->cil_code;
1401 sp [i] = locals [i];
1402 if (cfg->verbose_level > 3)
1403 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1405 locals = outb->in_stack;
1414 /* Emit code which loads interface_offsets [klass->interface_id]
1415 * The array is stored in memory before vtable.
1418 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1420 if (cfg->compile_aot) {
1421 int ioffset_reg = alloc_preg (cfg);
1422 int iid_reg = alloc_preg (cfg);
1424 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1425 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1426 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1429 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1434 mini_emit_interface_bitmap_check (MonoCompile *cfg, int intf_bit_reg, int base_reg, int offset, MonoClass *klass)
1436 int ibitmap_reg = alloc_preg (cfg);
1437 #ifdef COMPRESSED_INTERFACE_BITMAP
1439 MonoInst *res, *ins;
1440 NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, ibitmap_reg, base_reg, offset);
1441 MONO_ADD_INS (cfg->cbb, ins);
1443 if (cfg->compile_aot)
1444 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_IID, klass);
1446 EMIT_NEW_ICONST (cfg, args [1], klass->interface_id);
1447 res = mono_emit_jit_icall (cfg, mono_class_interface_match, args);
1448 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, intf_bit_reg, res->dreg);
1450 int ibitmap_byte_reg = alloc_preg (cfg);
1452 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, base_reg, offset);
1454 if (cfg->compile_aot) {
1455 int iid_reg = alloc_preg (cfg);
1456 int shifted_iid_reg = alloc_preg (cfg);
1457 int ibitmap_byte_address_reg = alloc_preg (cfg);
1458 int masked_iid_reg = alloc_preg (cfg);
1459 int iid_one_bit_reg = alloc_preg (cfg);
1460 int iid_bit_reg = alloc_preg (cfg);
1461 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1462 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1463 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1464 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1465 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1466 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1467 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1468 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1470 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1471 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1477 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1478 * stored in "klass_reg" implements the interface "klass".
1481 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1483 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, interface_bitmap), klass);
1487 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1488 * stored in "vtable_reg" implements the interface "klass".
1491 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1493 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, interface_bitmap), klass);
1497 * Emit code which checks whenever the interface id of @klass is smaller than
1498 * than the value given by max_iid_reg.
1501 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1502 MonoBasicBlock *false_target)
1504 if (cfg->compile_aot) {
1505 int iid_reg = alloc_preg (cfg);
1506 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1507 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1510 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1512 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1514 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1517 /* Same as above, but obtains max_iid from a vtable */
1519 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1520 MonoBasicBlock *false_target)
1522 int max_iid_reg = alloc_preg (cfg);
1524 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, max_interface_id));
1525 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1528 /* Same as above, but obtains max_iid from a klass */
1530 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1531 MonoBasicBlock *false_target)
1533 int max_iid_reg = alloc_preg (cfg);
1535 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, max_interface_id));
1536 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1540 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1542 int idepth_reg = alloc_preg (cfg);
1543 int stypes_reg = alloc_preg (cfg);
1544 int stype = alloc_preg (cfg);
1546 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1547 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1548 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1549 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1551 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1552 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1554 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1555 } else if (cfg->compile_aot) {
1556 int const_reg = alloc_preg (cfg);
1557 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1558 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1560 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1562 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1566 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1568 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1572 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1574 int intf_reg = alloc_preg (cfg);
1576 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1577 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1578 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1580 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1582 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1586 * Variant of the above that takes a register to the class, not the vtable.
1589 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1591 int intf_bit_reg = alloc_preg (cfg);
1593 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1594 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1595 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1597 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1599 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1603 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1606 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1607 } else if (cfg->compile_aot) {
1608 int const_reg = alloc_preg (cfg);
1609 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1610 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1612 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1614 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1618 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1620 mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1624 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1626 if (cfg->compile_aot) {
1627 int const_reg = alloc_preg (cfg);
1628 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1629 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1631 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1633 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1637 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1640 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1643 int rank_reg = alloc_preg (cfg);
1644 int eclass_reg = alloc_preg (cfg);
1646 g_assert (!klass_inst);
1647 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, rank));
1648 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1649 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1650 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1651 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
1652 if (klass->cast_class == mono_defaults.object_class) {
1653 int parent_reg = alloc_preg (cfg);
1654 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
1655 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1656 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1657 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1658 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1659 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1660 } else if (klass->cast_class == mono_defaults.enum_class) {
1661 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1662 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1663 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1665 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1666 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1669 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1670 /* Check that the object is a vector too */
1671 int bounds_reg = alloc_preg (cfg);
1672 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
1673 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1674 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1677 int idepth_reg = alloc_preg (cfg);
1678 int stypes_reg = alloc_preg (cfg);
1679 int stype = alloc_preg (cfg);
1681 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1682 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1683 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1684 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1686 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1687 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1688 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1693 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1695 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1699 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1703 g_assert (val == 0);
1708 if ((size <= 4) && (size <= align)) {
1711 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1714 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1717 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1719 #if SIZEOF_REGISTER == 8
1721 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1727 val_reg = alloc_preg (cfg);
1729 if (SIZEOF_REGISTER == 8)
1730 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1732 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1735 /* This could be optimized further if neccesary */
1737 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1744 #if !NO_UNALIGNED_ACCESS
1745 if (SIZEOF_REGISTER == 8) {
1747 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1752 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1760 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1765 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1770 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1777 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1784 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1785 g_assert (size < 10000);
1788 /* This could be optimized further if neccesary */
1790 cur_reg = alloc_preg (cfg);
1791 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1792 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1799 #if !NO_UNALIGNED_ACCESS
1800 if (SIZEOF_REGISTER == 8) {
1802 cur_reg = alloc_preg (cfg);
1803 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1804 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1813 cur_reg = alloc_preg (cfg);
1814 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1815 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1821 cur_reg = alloc_preg (cfg);
1822 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1823 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1829 cur_reg = alloc_preg (cfg);
1830 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1831 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1839 ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
1842 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1845 type = mini_get_basic_type_from_generic (gsctx, type);
1846 switch (type->type) {
1847 case MONO_TYPE_VOID:
1848 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALLVIRT: OP_VOIDCALL;
1851 case MONO_TYPE_BOOLEAN:
1854 case MONO_TYPE_CHAR:
1857 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1861 case MONO_TYPE_FNPTR:
1862 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1863 case MONO_TYPE_CLASS:
1864 case MONO_TYPE_STRING:
1865 case MONO_TYPE_OBJECT:
1866 case MONO_TYPE_SZARRAY:
1867 case MONO_TYPE_ARRAY:
1868 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1871 return calli? OP_LCALL_REG: virt? OP_LCALLVIRT: OP_LCALL;
1874 return calli? OP_FCALL_REG: virt? OP_FCALLVIRT: OP_FCALL;
1875 case MONO_TYPE_VALUETYPE:
1876 if (type->data.klass->enumtype) {
1877 type = mono_class_enum_basetype (type->data.klass);
1880 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1881 case MONO_TYPE_TYPEDBYREF:
1882 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1883 case MONO_TYPE_GENERICINST:
1884 type = &type->data.generic_class->container_class->byval_arg;
1887 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
1893 * target_type_is_incompatible:
1894 * @cfg: MonoCompile context
1896 * Check that the item @arg on the evaluation stack can be stored
1897 * in the target type (can be a local, or field, etc).
1898 * The cfg arg can be used to check if we need verification or just
1901 * Returns: non-0 value if arg can't be stored on a target.
1904 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
1906 MonoType *simple_type;
1909 if (target->byref) {
1910 /* FIXME: check that the pointed to types match */
1911 if (arg->type == STACK_MP)
1912 return arg->klass != mono_class_from_mono_type (target);
1913 if (arg->type == STACK_PTR)
1918 simple_type = mono_type_get_underlying_type (target);
1919 switch (simple_type->type) {
1920 case MONO_TYPE_VOID:
1924 case MONO_TYPE_BOOLEAN:
1927 case MONO_TYPE_CHAR:
1930 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1934 /* STACK_MP is needed when setting pinned locals */
1935 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1940 case MONO_TYPE_FNPTR:
1942 * Some opcodes like ldloca returns 'transient pointers' which can be stored in
1943 * in native int. (#688008).
1945 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1948 case MONO_TYPE_CLASS:
1949 case MONO_TYPE_STRING:
1950 case MONO_TYPE_OBJECT:
1951 case MONO_TYPE_SZARRAY:
1952 case MONO_TYPE_ARRAY:
1953 if (arg->type != STACK_OBJ)
1955 /* FIXME: check type compatibility */
1959 if (arg->type != STACK_I8)
1964 if (arg->type != STACK_R8)
1967 case MONO_TYPE_VALUETYPE:
1968 if (arg->type != STACK_VTYPE)
1970 klass = mono_class_from_mono_type (simple_type);
1971 if (klass != arg->klass)
1974 case MONO_TYPE_TYPEDBYREF:
1975 if (arg->type != STACK_VTYPE)
1977 klass = mono_class_from_mono_type (simple_type);
1978 if (klass != arg->klass)
1981 case MONO_TYPE_GENERICINST:
1982 if (mono_type_generic_inst_is_valuetype (simple_type)) {
1983 if (arg->type != STACK_VTYPE)
1985 klass = mono_class_from_mono_type (simple_type);
1986 if (klass != arg->klass)
1990 if (arg->type != STACK_OBJ)
1992 /* FIXME: check type compatibility */
1996 case MONO_TYPE_MVAR:
1997 /* FIXME: all the arguments must be references for now,
1998 * later look inside cfg and see if the arg num is
1999 * really a reference
2001 g_assert (cfg->generic_sharing_context);
2002 if (arg->type != STACK_OBJ)
2006 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
2012 * Prepare arguments for passing to a function call.
2013 * Return a non-zero value if the arguments can't be passed to the given
2015 * The type checks are not yet complete and some conversions may need
2016 * casts on 32 or 64 bit architectures.
2018 * FIXME: implement this using target_type_is_incompatible ()
2021 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
2023 MonoType *simple_type;
2027 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
2031 for (i = 0; i < sig->param_count; ++i) {
2032 if (sig->params [i]->byref) {
2033 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
2037 simple_type = sig->params [i];
2038 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
2040 switch (simple_type->type) {
2041 case MONO_TYPE_VOID:
2046 case MONO_TYPE_BOOLEAN:
2049 case MONO_TYPE_CHAR:
2052 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2058 case MONO_TYPE_FNPTR:
2059 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2062 case MONO_TYPE_CLASS:
2063 case MONO_TYPE_STRING:
2064 case MONO_TYPE_OBJECT:
2065 case MONO_TYPE_SZARRAY:
2066 case MONO_TYPE_ARRAY:
2067 if (args [i]->type != STACK_OBJ)
2072 if (args [i]->type != STACK_I8)
2077 if (args [i]->type != STACK_R8)
2080 case MONO_TYPE_VALUETYPE:
2081 if (simple_type->data.klass->enumtype) {
2082 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2085 if (args [i]->type != STACK_VTYPE)
2088 case MONO_TYPE_TYPEDBYREF:
2089 if (args [i]->type != STACK_VTYPE)
2092 case MONO_TYPE_GENERICINST:
2093 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2097 g_error ("unknown type 0x%02x in check_call_signature",
2105 callvirt_to_call (int opcode)
2110 case OP_VOIDCALLVIRT:
2119 g_assert_not_reached ();
2126 callvirt_to_call_membase (int opcode)
2130 return OP_CALL_MEMBASE;
2131 case OP_VOIDCALLVIRT:
2132 return OP_VOIDCALL_MEMBASE;
2134 return OP_FCALL_MEMBASE;
2136 return OP_LCALL_MEMBASE;
2138 return OP_VCALL_MEMBASE;
2140 g_assert_not_reached ();
2146 #ifdef MONO_ARCH_HAVE_IMT
2148 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
2152 if (COMPILE_LLVM (cfg)) {
2153 method_reg = alloc_preg (cfg);
2156 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2157 } else if (cfg->compile_aot) {
2158 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
2161 MONO_INST_NEW (cfg, ins, OP_PCONST);
2162 ins->inst_p0 = call->method;
2163 ins->dreg = method_reg;
2164 MONO_ADD_INS (cfg->cbb, ins);
2168 call->imt_arg_reg = method_reg;
2170 #ifdef MONO_ARCH_IMT_REG
2171 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2173 /* Need this to keep the IMT arg alive */
2174 mono_call_inst_add_outarg_reg (cfg, call, method_reg, 0, FALSE);
2179 #ifdef MONO_ARCH_IMT_REG
2180 method_reg = alloc_preg (cfg);
2183 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2184 } else if (cfg->compile_aot) {
2185 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
2188 MONO_INST_NEW (cfg, ins, OP_PCONST);
2189 ins->inst_p0 = call->method;
2190 ins->dreg = method_reg;
2191 MONO_ADD_INS (cfg->cbb, ins);
2194 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2196 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2201 static MonoJumpInfo *
2202 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2204 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2208 ji->data.target = target;
2213 inline static MonoCallInst *
2214 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2215 MonoInst **args, int calli, int virtual, int tail, int rgctx)
2218 #ifdef MONO_ARCH_SOFT_FLOAT
2223 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2225 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
2228 call->signature = sig;
2229 call->rgctx_reg = rgctx;
2231 type_to_eval_stack_type ((cfg), sig->ret, &call->inst);
2234 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2235 call->vret_var = cfg->vret_addr;
2236 //g_assert_not_reached ();
2238 } else if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2239 MonoInst *temp = mono_compile_create_var (cfg, sig->ret, OP_LOCAL);
2242 temp->backend.is_pinvoke = sig->pinvoke;
2245 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2246 * address of return value to increase optimization opportunities.
2247 * Before vtype decomposition, the dreg of the call ins itself represents the
2248 * fact the call modifies the return value. After decomposition, the call will
2249 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2250 * will be transformed into an LDADDR.
2252 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2253 loada->dreg = alloc_preg (cfg);
2254 loada->inst_p0 = temp;
2255 /* We reference the call too since call->dreg could change during optimization */
2256 loada->inst_p1 = call;
2257 MONO_ADD_INS (cfg->cbb, loada);
2259 call->inst.dreg = temp->dreg;
2261 call->vret_var = loada;
2262 } else if (!MONO_TYPE_IS_VOID (sig->ret))
2263 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2265 #ifdef MONO_ARCH_SOFT_FLOAT
2266 if (COMPILE_SOFT_FLOAT (cfg)) {
2268 * If the call has a float argument, we would need to do an r8->r4 conversion using
2269 * an icall, but that cannot be done during the call sequence since it would clobber
2270 * the call registers + the stack. So we do it before emitting the call.
2272 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2274 MonoInst *in = call->args [i];
2276 if (i >= sig->hasthis)
2277 t = sig->params [i - sig->hasthis];
2279 t = &mono_defaults.int_class->byval_arg;
2280 t = mono_type_get_underlying_type (t);
2282 if (!t->byref && t->type == MONO_TYPE_R4) {
2283 MonoInst *iargs [1];
2287 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2289 /* The result will be in an int vreg */
2290 call->args [i] = conv;
2297 if (COMPILE_LLVM (cfg))
2298 mono_llvm_emit_call (cfg, call);
2300 mono_arch_emit_call (cfg, call);
2302 mono_arch_emit_call (cfg, call);
2305 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2306 cfg->flags |= MONO_CFG_HAS_CALLS;
2312 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2314 #ifdef MONO_ARCH_RGCTX_REG
2315 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2316 cfg->uses_rgctx_reg = TRUE;
2317 call->rgctx_reg = TRUE;
2319 call->rgctx_arg_reg = rgctx_reg;
2326 inline static MonoInst*
2327 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *rgctx_arg)
2333 rgctx_reg = mono_alloc_preg (cfg);
2334 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2337 call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE, rgctx_arg ? TRUE : FALSE);
2339 call->inst.sreg1 = addr->dreg;
2341 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2344 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2346 return (MonoInst*)call;
2350 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type);
2352 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2355 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2356 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *rgctx_arg)
2358 gboolean might_be_remote;
2359 gboolean virtual = this != NULL;
2360 gboolean enable_for_aot = TRUE;
2366 rgctx_reg = mono_alloc_preg (cfg);
2367 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2370 if (method->string_ctor) {
2371 /* Create the real signature */
2372 /* FIXME: Cache these */
2373 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2374 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2379 context_used = mono_method_check_context_used (method);
2381 might_be_remote = this && sig->hasthis &&
2382 (method->klass->marshalbyref || method->klass == mono_defaults.object_class) &&
2383 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && (!MONO_CHECK_THIS (this) || context_used);
2385 if (might_be_remote && context_used) {
2388 g_assert (cfg->generic_sharing_context);
2390 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2392 return mono_emit_calli (cfg, sig, args, addr, NULL);
2395 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, FALSE, rgctx_arg ? TRUE : FALSE);
2397 if (might_be_remote)
2398 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2400 call->method = method;
2401 call->inst.flags |= MONO_INST_HAS_METHOD;
2402 call->inst.inst_left = this;
2405 int vtable_reg, slot_reg, this_reg;
2407 this_reg = this->dreg;
2409 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
2410 if ((method->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (method->name, "Invoke"))) {
2411 MonoInst *dummy_use;
2413 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2415 /* Make a call to delegate->invoke_impl */
2416 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2417 call->inst.inst_basereg = this_reg;
2418 call->inst.inst_offset = G_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2419 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2421 /* We must emit a dummy use here because the delegate trampoline will
2422 replace the 'this' argument with the delegate target making this activation
2423 no longer a root for the delegate.
2424 This is an issue for delegates that target collectible code such as dynamic
2425 methods of GC'able assemblies.
2427 For a test case look into #667921.
2429 FIXME: a dummy use is not the best way to do it as the local register allocator
2430 will put it on a caller save register and spil it around the call.
2431 Ideally, we would either put it on a callee save register or only do the store part.
2433 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [0]);
2435 return (MonoInst*)call;
2439 if ((!cfg->compile_aot || enable_for_aot) &&
2440 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2441 (MONO_METHOD_IS_FINAL (method) &&
2442 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2443 !(method->klass->marshalbyref && context_used)) {
2445 * the method is not virtual, we just need to ensure this is not null
2446 * and then we can call the method directly.
2448 if (method->klass->marshalbyref || method->klass == mono_defaults.object_class) {
2450 * The check above ensures method is not gshared, this is needed since
2451 * gshared methods can't have wrappers.
2453 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2456 if (!method->string_ctor)
2457 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2459 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2460 } else if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2462 * the method is virtual, but we can statically dispatch since either
2463 * it's class or the method itself are sealed.
2464 * But first we need to ensure it's not a null reference.
2466 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2468 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2470 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2472 vtable_reg = alloc_preg (cfg);
2473 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2474 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2476 #ifdef MONO_ARCH_HAVE_IMT
2478 guint32 imt_slot = mono_method_get_imt_slot (method);
2479 emit_imt_argument (cfg, call, imt_arg);
2480 slot_reg = vtable_reg;
2481 call->inst.inst_offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2484 if (slot_reg == -1) {
2485 slot_reg = alloc_preg (cfg);
2486 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2487 call->inst.inst_offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
2490 slot_reg = vtable_reg;
2491 call->inst.inst_offset = G_STRUCT_OFFSET (MonoVTable, vtable) +
2492 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2493 #ifdef MONO_ARCH_HAVE_IMT
2495 g_assert (mono_method_signature (method)->generic_param_count);
2496 emit_imt_argument (cfg, call, imt_arg);
2501 call->inst.sreg1 = slot_reg;
2502 call->virtual = TRUE;
2506 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2509 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2511 return (MonoInst*)call;
2515 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2517 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), args, this, NULL, NULL);
2521 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2528 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE, FALSE);
2531 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2533 return (MonoInst*)call;
2537 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2539 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2543 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2547 * mono_emit_abs_call:
2549 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2551 inline static MonoInst*
2552 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2553 MonoMethodSignature *sig, MonoInst **args)
2555 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2559 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2562 if (cfg->abs_patches == NULL)
2563 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2564 g_hash_table_insert (cfg->abs_patches, ji, ji);
2565 ins = mono_emit_native_call (cfg, ji, sig, args);
2566 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2571 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2573 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2574 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2578 * Native code might return non register sized integers
2579 * without initializing the upper bits.
2581 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
2582 case OP_LOADI1_MEMBASE:
2583 widen_op = OP_ICONV_TO_I1;
2585 case OP_LOADU1_MEMBASE:
2586 widen_op = OP_ICONV_TO_U1;
2588 case OP_LOADI2_MEMBASE:
2589 widen_op = OP_ICONV_TO_I2;
2591 case OP_LOADU2_MEMBASE:
2592 widen_op = OP_ICONV_TO_U2;
2598 if (widen_op != -1) {
2599 int dreg = alloc_preg (cfg);
2602 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
2603 widen->type = ins->type;
2613 get_memcpy_method (void)
2615 static MonoMethod *memcpy_method = NULL;
2616 if (!memcpy_method) {
2617 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2619 g_error ("Old corlib found. Install a new one");
2621 return memcpy_method;
2625 create_write_barrier_bitmap (MonoCompile *cfg, MonoClass *klass, unsigned *wb_bitmap, int offset)
2627 MonoClassField *field;
2628 gpointer iter = NULL;
2630 while ((field = mono_class_get_fields (klass, &iter))) {
2633 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
2635 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
2636 if (mini_type_is_reference (cfg, mono_field_get_type (field))) {
2637 g_assert ((foffset % SIZEOF_VOID_P) == 0);
2638 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
2640 MonoClass *field_class = mono_class_from_mono_type (field->type);
2641 if (field_class->has_references)
2642 create_write_barrier_bitmap (cfg, field_class, wb_bitmap, offset + foffset);
2648 emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value, int value_reg)
2650 int card_table_shift_bits;
2651 gpointer card_table_mask;
2653 MonoInst *dummy_use;
2654 int nursery_shift_bits;
2655 size_t nursery_size;
2656 gboolean has_card_table_wb = FALSE;
2658 if (!cfg->gen_write_barriers)
2661 card_table = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
2663 mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
2665 #ifdef MONO_ARCH_HAVE_CARD_TABLE_WBARRIER
2666 has_card_table_wb = TRUE;
2669 if (has_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0) {
2672 MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
2673 wbarrier->sreg1 = ptr->dreg;
2675 wbarrier->sreg2 = value->dreg;
2677 wbarrier->sreg2 = value_reg;
2678 MONO_ADD_INS (cfg->cbb, wbarrier);
2679 } else if (card_table) {
2680 int offset_reg = alloc_preg (cfg);
2681 int card_reg = alloc_preg (cfg);
2684 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
2685 if (card_table_mask)
2686 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
2688 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
2689 * IMM's larger than 32bits.
2691 if (cfg->compile_aot) {
2692 MONO_EMIT_NEW_AOTCONST (cfg, card_reg, NULL, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR);
2694 MONO_INST_NEW (cfg, ins, OP_PCONST);
2695 ins->inst_p0 = card_table;
2696 ins->dreg = card_reg;
2697 MONO_ADD_INS (cfg->cbb, ins);
2700 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
2701 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
2703 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
2704 mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
2708 EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
2710 MONO_INST_NEW (cfg, dummy_use, OP_DUMMY_USE);
2711 dummy_use->sreg1 = value_reg;
2712 MONO_ADD_INS (cfg->cbb, dummy_use);
2717 mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
2719 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
2720 unsigned need_wb = 0;
2725 /*types with references can't have alignment smaller than sizeof(void*) */
2726 if (align < SIZEOF_VOID_P)
2729 /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
2730 if (size > 32 * SIZEOF_VOID_P)
2733 create_write_barrier_bitmap (cfg, klass, &need_wb, 0);
2735 /* We don't unroll more than 5 stores to avoid code bloat. */
2736 if (size > 5 * SIZEOF_VOID_P) {
2737 /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
2738 size += (SIZEOF_VOID_P - 1);
2739 size &= ~(SIZEOF_VOID_P - 1);
2741 EMIT_NEW_ICONST (cfg, iargs [2], size);
2742 EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
2743 mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
2747 destreg = iargs [0]->dreg;
2748 srcreg = iargs [1]->dreg;
2751 dest_ptr_reg = alloc_preg (cfg);
2752 tmp_reg = alloc_preg (cfg);
2755 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
2757 while (size >= SIZEOF_VOID_P) {
2758 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, tmp_reg, srcreg, offset);
2759 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
2762 emit_write_barrier (cfg, iargs [0], NULL, tmp_reg);
2764 offset += SIZEOF_VOID_P;
2765 size -= SIZEOF_VOID_P;
2768 /*tmp += sizeof (void*)*/
2769 if (size >= SIZEOF_VOID_P) {
2770 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
2771 MONO_ADD_INS (cfg->cbb, iargs [0]);
2775 /* Those cannot be references since size < sizeof (void*) */
2777 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
2778 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
2784 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
2785 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
2791 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
2792 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
2801 * Emit code to copy a valuetype of type @klass whose address is stored in
2802 * @src->dreg to memory whose address is stored at @dest->dreg.
2805 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
2807 MonoInst *iargs [4];
2810 MonoMethod *memcpy_method;
2814 * This check breaks with spilled vars... need to handle it during verification anyway.
2815 * g_assert (klass && klass == src->klass && klass == dest->klass);
2819 n = mono_class_native_size (klass, &align);
2821 n = mono_class_value_size (klass, &align);
2823 /* if native is true there should be no references in the struct */
2824 if (cfg->gen_write_barriers && klass->has_references && !native) {
2825 /* Avoid barriers when storing to the stack */
2826 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
2827 (dest->opcode == OP_LDADDR))) {
2828 int context_used = 0;
2833 if (cfg->generic_sharing_context)
2834 context_used = mono_class_check_context_used (klass);
2836 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
2837 if ((cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
2839 } else if (context_used) {
2840 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
2842 if (cfg->compile_aot) {
2843 EMIT_NEW_CLASSCONST (cfg, iargs [2], klass);
2845 EMIT_NEW_PCONST (cfg, iargs [2], klass);
2846 mono_class_compute_gc_descriptor (klass);
2850 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
2855 if ((cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
2856 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
2857 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
2861 EMIT_NEW_ICONST (cfg, iargs [2], n);
2863 memcpy_method = get_memcpy_method ();
2864 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
2869 get_memset_method (void)
2871 static MonoMethod *memset_method = NULL;
2872 if (!memset_method) {
2873 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
2875 g_error ("Old corlib found. Install a new one");
2877 return memset_method;
2881 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
2883 MonoInst *iargs [3];
2886 MonoMethod *memset_method;
2888 /* FIXME: Optimize this for the case when dest is an LDADDR */
2890 mono_class_init (klass);
2891 n = mono_class_value_size (klass, &align);
2893 if (n <= sizeof (gpointer) * 5) {
2894 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
2897 memset_method = get_memset_method ();
2899 EMIT_NEW_ICONST (cfg, iargs [1], 0);
2900 EMIT_NEW_ICONST (cfg, iargs [2], n);
2901 mono_emit_method_call (cfg, memset_method, iargs, NULL);
2906 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
2908 MonoInst *this = NULL;
2910 g_assert (cfg->generic_sharing_context);
2912 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
2913 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
2914 !method->klass->valuetype)
2915 EMIT_NEW_ARGLOAD (cfg, this, 0);
2917 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
2918 MonoInst *mrgctx_loc, *mrgctx_var;
2921 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
2923 mrgctx_loc = mono_get_vtable_var (cfg);
2924 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
2927 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
2928 MonoInst *vtable_loc, *vtable_var;
2932 vtable_loc = mono_get_vtable_var (cfg);
2933 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
2935 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
2936 MonoInst *mrgctx_var = vtable_var;
2939 vtable_reg = alloc_preg (cfg);
2940 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, G_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
2941 vtable_var->type = STACK_PTR;
2949 vtable_reg = alloc_preg (cfg);
2950 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2955 static MonoJumpInfoRgctxEntry *
2956 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, MonoRgctxInfoType info_type)
2958 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
2959 res->method = method;
2960 res->in_mrgctx = in_mrgctx;
2961 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
2962 res->data->type = patch_type;
2963 res->data->data.target = patch_data;
2964 res->info_type = info_type;
2969 static inline MonoInst*
2970 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
2972 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
2976 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
2977 MonoClass *klass, MonoRgctxInfoType rgctx_type)
2979 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
2980 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2982 return emit_rgctx_fetch (cfg, rgctx, entry);
2986 * emit_get_rgctx_method:
2988 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
2989 * normal constants, else emit a load from the rgctx.
2992 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
2993 MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
2995 if (!context_used) {
2998 switch (rgctx_type) {
2999 case MONO_RGCTX_INFO_METHOD:
3000 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
3002 case MONO_RGCTX_INFO_METHOD_RGCTX:
3003 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
3006 g_assert_not_reached ();
3009 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
3010 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3012 return emit_rgctx_fetch (cfg, rgctx, entry);
3017 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
3018 MonoClassField *field, MonoRgctxInfoType rgctx_type)
3020 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
3021 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3023 return emit_rgctx_fetch (cfg, rgctx, entry);
3027 * On return the caller must check @klass for load errors.
3030 emit_generic_class_init (MonoCompile *cfg, MonoClass *klass)
3032 MonoInst *vtable_arg;
3034 int context_used = 0;
3036 if (cfg->generic_sharing_context)
3037 context_used = mono_class_check_context_used (klass);
3040 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
3041 klass, MONO_RGCTX_INFO_VTABLE);
3043 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3047 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
3050 if (COMPILE_LLVM (cfg))
3051 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline_llvm, &vtable_arg);
3053 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable_arg);
3054 #ifdef MONO_ARCH_VTABLE_REG
3055 mono_call_inst_add_outarg_reg (cfg, call, vtable_arg->dreg, MONO_ARCH_VTABLE_REG, FALSE);
3056 cfg->uses_vtable_reg = TRUE;
3063 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg)
3065 if (mini_get_debug_options ()->better_cast_details) {
3066 int to_klass_reg = alloc_preg (cfg);
3067 int vtable_reg = alloc_preg (cfg);
3068 int klass_reg = alloc_preg (cfg);
3069 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3072 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3076 MONO_ADD_INS (cfg->cbb, tls_get);
3077 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3078 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3080 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3081 MONO_EMIT_NEW_PCONST (cfg, to_klass_reg, klass);
3082 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3087 reset_cast_details (MonoCompile *cfg)
3089 /* Reset the variables holding the cast details */
3090 if (mini_get_debug_options ()->better_cast_details) {
3091 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3093 MONO_ADD_INS (cfg->cbb, tls_get);
3094 /* It is enough to reset the from field */
3095 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3100 * On return the caller must check @array_class for load errors
3103 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
3105 int vtable_reg = alloc_preg (cfg);
3106 int context_used = 0;
3108 if (cfg->generic_sharing_context)
3109 context_used = mono_class_check_context_used (array_class);
3111 save_cast_details (cfg, array_class, obj->dreg);
3113 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3115 if (cfg->opt & MONO_OPT_SHARED) {
3116 int class_reg = alloc_preg (cfg);
3117 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3118 if (cfg->compile_aot) {
3119 int klass_reg = alloc_preg (cfg);
3120 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
3121 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
3123 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
3125 } else if (context_used) {
3126 MonoInst *vtable_ins;
3128 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
3129 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
3131 if (cfg->compile_aot) {
3135 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3137 vt_reg = alloc_preg (cfg);
3138 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
3139 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
3142 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3144 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
3148 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
3150 reset_cast_details (cfg);
3154 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
3155 * generic code is generated.
3158 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
3160 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
3163 MonoInst *rgctx, *addr;
3165 /* FIXME: What if the class is shared? We might not
3166 have to get the address of the method from the
3168 addr = emit_get_rgctx_method (cfg, context_used, method,
3169 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3171 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3173 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
3175 return mono_emit_method_call (cfg, method, &val, NULL);
3180 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
3184 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
3185 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
3186 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
3187 int rank_reg = alloc_dreg (cfg ,STACK_I4);
3189 obj_reg = sp [0]->dreg;
3190 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3191 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3193 /* FIXME: generics */
3194 g_assert (klass->rank == 0);
3197 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
3198 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3200 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3201 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, element_class));
3204 MonoInst *element_class;
3206 /* This assertion is from the unboxcast insn */
3207 g_assert (klass->rank == 0);
3209 element_class = emit_get_rgctx_klass (cfg, context_used,
3210 klass->element_class, MONO_RGCTX_INFO_KLASS);
3212 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
3213 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3215 save_cast_details (cfg, klass->element_class, obj_reg);
3216 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
3217 reset_cast_details (cfg);
3220 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), obj_reg, sizeof (MonoObject));
3221 MONO_ADD_INS (cfg->cbb, add);
3222 add->type = STACK_MP;
3229 * Returns NULL and set the cfg exception on error.
3232 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
3234 MonoInst *iargs [2];
3240 MonoInst *iargs [2];
3243 FIXME: we cannot get managed_alloc here because we can't get
3244 the class's vtable (because it's not a closed class)
3246 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3247 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
3250 if (cfg->opt & MONO_OPT_SHARED)
3251 rgctx_info = MONO_RGCTX_INFO_KLASS;
3253 rgctx_info = MONO_RGCTX_INFO_VTABLE;
3254 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
3256 if (cfg->opt & MONO_OPT_SHARED) {
3257 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3259 alloc_ftn = mono_object_new;
3262 alloc_ftn = mono_object_new_specific;
3265 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3268 if (cfg->opt & MONO_OPT_SHARED) {
3269 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3270 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
3272 alloc_ftn = mono_object_new;
3273 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
3274 /* This happens often in argument checking code, eg. throw new FooException... */
3275 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
3276 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
3277 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
3279 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3280 MonoMethod *managed_alloc = NULL;
3284 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
3285 cfg->exception_ptr = klass;
3289 #ifndef MONO_CROSS_COMPILE
3290 managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
3293 if (managed_alloc) {
3294 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3295 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3297 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
3299 guint32 lw = vtable->klass->instance_size;
3300 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
3301 EMIT_NEW_ICONST (cfg, iargs [0], lw);
3302 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
3305 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3309 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3313 * Returns NULL and set the cfg exception on error.
3316 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used)
3318 MonoInst *alloc, *ins;
3320 if (mono_class_is_nullable (klass)) {
3321 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
3324 /* FIXME: What if the class is shared? We might not
3325 have to get the method address from the RGCTX. */
3326 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
3327 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3328 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3330 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
3332 return mono_emit_method_call (cfg, method, &val, NULL);
3336 alloc = handle_alloc (cfg, klass, TRUE, context_used);
3340 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3347 mini_class_has_reference_variant_generic_argument (MonoCompile *cfg, MonoClass *klass, int context_used)
3350 MonoGenericContainer *container;
3351 MonoGenericInst *ginst;
3353 if (klass->generic_class) {
3354 container = klass->generic_class->container_class->generic_container;
3355 ginst = klass->generic_class->context.class_inst;
3356 } else if (klass->generic_container && context_used) {
3357 container = klass->generic_container;
3358 ginst = container->context.class_inst;
3363 for (i = 0; i < container->type_argc; ++i) {
3365 if (!(mono_generic_container_get_param_info (container, i)->flags & (MONO_GEN_PARAM_VARIANT|MONO_GEN_PARAM_COVARIANT)))
3367 type = ginst->type_argv [i];
3368 if (mini_type_is_reference (cfg, type))
3374 // FIXME: This doesn't work yet (class libs tests fail?)
3375 #define is_complex_isinst(klass) (TRUE || (klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || klass->marshalbyref || (klass->flags & TYPE_ATTRIBUTE_SEALED) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
3378 * Returns NULL and set the cfg exception on error.
3381 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3383 MonoBasicBlock *is_null_bb;
3384 int obj_reg = src->dreg;
3385 int vtable_reg = alloc_preg (cfg);
3386 MonoInst *klass_inst = NULL;
3391 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
3392 MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
3393 MonoInst *cache_ins;
3395 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
3400 /* klass - it's the second element of the cache entry*/
3401 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
3404 args [2] = cache_ins;
3406 return mono_emit_method_call (cfg, mono_castclass, args, NULL);
3409 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3411 if (is_complex_isinst (klass)) {
3412 /* Complex case, handle by an icall */
3418 args [1] = klass_inst;
3420 return mono_emit_jit_icall (cfg, mono_object_castclass, args);
3422 /* Simple case, handled by the code below */
3426 NEW_BBLOCK (cfg, is_null_bb);
3428 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3429 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3431 save_cast_details (cfg, klass, obj_reg);
3433 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3434 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3435 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
3437 int klass_reg = alloc_preg (cfg);
3439 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3441 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3442 /* the remoting code is broken, access the class for now */
3443 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3444 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3446 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
3447 cfg->exception_ptr = klass;
3450 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3452 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3453 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3455 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3457 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3458 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
3462 MONO_START_BB (cfg, is_null_bb);
3464 reset_cast_details (cfg);
3470 * Returns NULL and set the cfg exception on error.
3473 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3476 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
3477 int obj_reg = src->dreg;
3478 int vtable_reg = alloc_preg (cfg);
3479 int res_reg = alloc_ireg_ref (cfg);
3480 MonoInst *klass_inst = NULL;
3485 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
3486 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
3487 MonoInst *cache_ins;
3489 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
3494 /* klass - it's the second element of the cache entry*/
3495 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
3498 args [2] = cache_ins;
3500 return mono_emit_method_call (cfg, mono_isinst, args, NULL);
3503 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3505 if (is_complex_isinst (klass)) {
3506 /* Complex case, handle by an icall */
3512 args [1] = klass_inst;
3514 return mono_emit_jit_icall (cfg, mono_object_isinst, args);
3516 /* Simple case, the code below can handle it */
3520 NEW_BBLOCK (cfg, is_null_bb);
3521 NEW_BBLOCK (cfg, false_bb);
3522 NEW_BBLOCK (cfg, end_bb);
3524 /* Do the assignment at the beginning, so the other assignment can be if converted */
3525 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
3526 ins->type = STACK_OBJ;
3529 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3530 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
3532 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3534 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3535 g_assert (!context_used);
3536 /* the is_null_bb target simply copies the input register to the output */
3537 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
3539 int klass_reg = alloc_preg (cfg);
3542 int rank_reg = alloc_preg (cfg);
3543 int eclass_reg = alloc_preg (cfg);
3545 g_assert (!context_used);
3546 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3547 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
3548 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3549 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3550 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
3551 if (klass->cast_class == mono_defaults.object_class) {
3552 int parent_reg = alloc_preg (cfg);
3553 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
3554 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
3555 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3556 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3557 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
3558 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
3559 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3560 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3561 } else if (klass->cast_class == mono_defaults.enum_class) {
3562 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3563 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3564 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
3565 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3567 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
3568 /* Check that the object is a vector too */
3569 int bounds_reg = alloc_preg (cfg);
3570 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
3571 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
3572 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3575 /* the is_null_bb target simply copies the input register to the output */
3576 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3578 } else if (mono_class_is_nullable (klass)) {
3579 g_assert (!context_used);
3580 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3581 /* the is_null_bb target simply copies the input register to the output */
3582 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
3584 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3585 g_assert (!context_used);
3586 /* the remoting code is broken, access the class for now */
3587 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3588 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3590 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
3591 cfg->exception_ptr = klass;
3594 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3596 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3597 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3599 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3600 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
3602 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3603 /* the is_null_bb target simply copies the input register to the output */
3604 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
3609 MONO_START_BB (cfg, false_bb);
3611 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
3612 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3614 MONO_START_BB (cfg, is_null_bb);
3616 MONO_START_BB (cfg, end_bb);
3622 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3624 /* This opcode takes as input an object reference and a class, and returns:
3625 0) if the object is an instance of the class,
3626 1) if the object is not instance of the class,
3627 2) if the object is a proxy whose type cannot be determined */
3630 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
3631 int obj_reg = src->dreg;
3632 int dreg = alloc_ireg (cfg);
3634 int klass_reg = alloc_preg (cfg);
3636 NEW_BBLOCK (cfg, true_bb);
3637 NEW_BBLOCK (cfg, false_bb);
3638 NEW_BBLOCK (cfg, false2_bb);
3639 NEW_BBLOCK (cfg, end_bb);
3640 NEW_BBLOCK (cfg, no_proxy_bb);
3642 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3643 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
3645 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3646 NEW_BBLOCK (cfg, interface_fail_bb);
3648 tmp_reg = alloc_preg (cfg);
3649 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3650 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
3651 MONO_START_BB (cfg, interface_fail_bb);
3652 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3654 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
3656 tmp_reg = alloc_preg (cfg);
3657 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3658 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3659 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
3661 tmp_reg = alloc_preg (cfg);
3662 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3663 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3665 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3666 tmp_reg = alloc_preg (cfg);
3667 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3668 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3670 tmp_reg = alloc_preg (cfg);
3671 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3672 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3673 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3675 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
3676 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
3678 MONO_START_BB (cfg, no_proxy_bb);
3680 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
3683 MONO_START_BB (cfg, false_bb);
3685 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3686 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3688 MONO_START_BB (cfg, false2_bb);
3690 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
3691 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3693 MONO_START_BB (cfg, true_bb);
3695 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3697 MONO_START_BB (cfg, end_bb);
3700 MONO_INST_NEW (cfg, ins, OP_ICONST);
3702 ins->type = STACK_I4;
3708 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3710 /* This opcode takes as input an object reference and a class, and returns:
3711 0) if the object is an instance of the class,
3712 1) if the object is a proxy whose type cannot be determined
3713 an InvalidCastException exception is thrown otherwhise*/
3716 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
3717 int obj_reg = src->dreg;
3718 int dreg = alloc_ireg (cfg);
3719 int tmp_reg = alloc_preg (cfg);
3720 int klass_reg = alloc_preg (cfg);
3722 NEW_BBLOCK (cfg, end_bb);
3723 NEW_BBLOCK (cfg, ok_result_bb);
3725 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3726 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
3728 save_cast_details (cfg, klass, obj_reg);
3730 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3731 NEW_BBLOCK (cfg, interface_fail_bb);
3733 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3734 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
3735 MONO_START_BB (cfg, interface_fail_bb);
3736 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3738 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
3740 tmp_reg = alloc_preg (cfg);
3741 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3742 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3743 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
3745 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3746 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3749 NEW_BBLOCK (cfg, no_proxy_bb);
3751 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3752 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3753 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3755 tmp_reg = alloc_preg (cfg);
3756 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3757 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3759 tmp_reg = alloc_preg (cfg);
3760 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3761 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3762 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3764 NEW_BBLOCK (cfg, fail_1_bb);
3766 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
3768 MONO_START_BB (cfg, fail_1_bb);
3770 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3771 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3773 MONO_START_BB (cfg, no_proxy_bb);
3775 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
3778 MONO_START_BB (cfg, ok_result_bb);
3780 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3782 MONO_START_BB (cfg, end_bb);
3785 MONO_INST_NEW (cfg, ins, OP_ICONST);
3787 ins->type = STACK_I4;
3793 * Returns NULL and set the cfg exception on error.
3795 static G_GNUC_UNUSED MonoInst*
3796 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used)
3800 gpointer *trampoline;
3801 MonoInst *obj, *method_ins, *tramp_ins;
3805 obj = handle_alloc (cfg, klass, FALSE, 0);
3809 /* Inline the contents of mono_delegate_ctor */
3811 /* Set target field */
3812 /* Optimize away setting of NULL target */
3813 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0)) {
3814 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
3815 if (cfg->gen_write_barriers) {
3816 dreg = alloc_preg (cfg);
3817 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target));
3818 emit_write_barrier (cfg, ptr, target, 0);
3822 /* Set method field */
3823 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
3824 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
3825 if (cfg->gen_write_barriers) {
3826 dreg = alloc_preg (cfg);
3827 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method));
3828 emit_write_barrier (cfg, ptr, method_ins, 0);
3831 * To avoid looking up the compiled code belonging to the target method
3832 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
3833 * store it, and we fill it after the method has been compiled.
3835 if (!cfg->compile_aot && !method->dynamic) {
3836 MonoInst *code_slot_ins;
3839 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
3841 domain = mono_domain_get ();
3842 mono_domain_lock (domain);
3843 if (!domain_jit_info (domain)->method_code_hash)
3844 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
3845 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
3847 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
3848 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
3850 mono_domain_unlock (domain);
3852 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
3854 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
3857 /* Set invoke_impl field */
3858 if (cfg->compile_aot) {
3859 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, klass);
3861 trampoline = mono_create_delegate_trampoline (klass);
3862 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
3864 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
3866 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
3872 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
3874 MonoJitICallInfo *info;
3876 /* Need to register the icall so it gets an icall wrapper */
3877 info = mono_get_array_new_va_icall (rank);
3879 cfg->flags |= MONO_CFG_HAS_VARARGS;
3881 /* mono_array_new_va () needs a vararg calling convention */
3882 cfg->disable_llvm = TRUE;
3884 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
3885 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
3889 mono_emit_load_got_addr (MonoCompile *cfg)
3891 MonoInst *getaddr, *dummy_use;
3893 if (!cfg->got_var || cfg->got_var_allocated)
3896 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
3897 getaddr->dreg = cfg->got_var->dreg;
3899 /* Add it to the start of the first bblock */
3900 if (cfg->bb_entry->code) {
3901 getaddr->next = cfg->bb_entry->code;
3902 cfg->bb_entry->code = getaddr;
3905 MONO_ADD_INS (cfg->bb_entry, getaddr);
3907 cfg->got_var_allocated = TRUE;
3910 * Add a dummy use to keep the got_var alive, since real uses might
3911 * only be generated by the back ends.
3912 * Add it to end_bblock, so the variable's lifetime covers the whole
3914 * It would be better to make the usage of the got var explicit in all
3915 * cases when the backend needs it (i.e. calls, throw etc.), so this
3916 * wouldn't be needed.
3918 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
3919 MONO_ADD_INS (cfg->bb_exit, dummy_use);
3922 static int inline_limit;
3923 static gboolean inline_limit_inited;
3926 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
3928 MonoMethodHeaderSummary header;
3930 #ifdef MONO_ARCH_SOFT_FLOAT
3931 MonoMethodSignature *sig = mono_method_signature (method);
3935 if (cfg->generic_sharing_context)
3938 if (cfg->inline_depth > 10)
3941 #ifdef MONO_ARCH_HAVE_LMF_OPS
3942 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3943 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
3944 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
3949 if (!mono_method_get_header_summary (method, &header))
3952 /*runtime, icall and pinvoke are checked by summary call*/
3953 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
3954 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
3955 (method->klass->marshalbyref) ||
3959 /* also consider num_locals? */
3960 /* Do the size check early to avoid creating vtables */
3961 if (!inline_limit_inited) {
3962 if (getenv ("MONO_INLINELIMIT"))
3963 inline_limit = atoi (getenv ("MONO_INLINELIMIT"));
3965 inline_limit = INLINE_LENGTH_LIMIT;
3966 inline_limit_inited = TRUE;
3968 if (header.code_size >= inline_limit)
3972 * if we can initialize the class of the method right away, we do,
3973 * otherwise we don't allow inlining if the class needs initialization,
3974 * since it would mean inserting a call to mono_runtime_class_init()
3975 * inside the inlined code
3977 if (!(cfg->opt & MONO_OPT_SHARED)) {
3978 if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
3979 if (cfg->run_cctors && method->klass->has_cctor) {
3980 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
3981 if (!method->klass->runtime_info)
3982 /* No vtable created yet */
3984 vtable = mono_class_vtable (cfg->domain, method->klass);
3987 /* This makes so that inline cannot trigger */
3988 /* .cctors: too many apps depend on them */
3989 /* running with a specific order... */
3990 if (! vtable->initialized)
3992 mono_runtime_class_init (vtable);
3994 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
3995 if (!method->klass->runtime_info)
3996 /* No vtable created yet */
3998 vtable = mono_class_vtable (cfg->domain, method->klass);
4001 if (!vtable->initialized)
4006 * If we're compiling for shared code
4007 * the cctor will need to be run at aot method load time, for example,
4008 * or at the end of the compilation of the inlining method.
4010 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
4015 * CAS - do not inline methods with declarative security
4016 * Note: this has to be before any possible return TRUE;
4018 if (mono_method_has_declsec (method))
4021 #ifdef MONO_ARCH_SOFT_FLOAT
4023 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
4025 for (i = 0; i < sig->param_count; ++i)
4026 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
4034 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoVTable *vtable)
4036 if (vtable->initialized && !cfg->compile_aot)
4039 if (vtable->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)
4042 if (!mono_class_needs_cctor_run (vtable->klass, method))
4045 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (vtable->klass == method->klass))
4046 /* The initialization is already done before the method is called */
4053 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
4057 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
4059 mono_class_init (klass);
4060 size = mono_class_array_element_size (klass);
4062 mult_reg = alloc_preg (cfg);
4063 array_reg = arr->dreg;
4064 index_reg = index->dreg;
4066 #if SIZEOF_REGISTER == 8
4067 /* The array reg is 64 bits but the index reg is only 32 */
4068 if (COMPILE_LLVM (cfg)) {
4070 index2_reg = index_reg;
4072 index2_reg = alloc_preg (cfg);
4073 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
4076 if (index->type == STACK_I8) {
4077 index2_reg = alloc_preg (cfg);
4078 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
4080 index2_reg = index_reg;
4085 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
4087 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4088 if (size == 1 || size == 2 || size == 4 || size == 8) {
4089 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
4091 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], G_STRUCT_OFFSET (MonoArray, vector));
4092 ins->klass = mono_class_get_element_class (klass);
4093 ins->type = STACK_MP;
4099 add_reg = alloc_ireg_mp (cfg);
4101 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
4102 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
4103 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
4104 ins->klass = mono_class_get_element_class (klass);
4105 ins->type = STACK_MP;
4106 MONO_ADD_INS (cfg->cbb, ins);
4111 #ifndef MONO_ARCH_EMULATE_MUL_DIV
4113 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
4115 int bounds_reg = alloc_preg (cfg);
4116 int add_reg = alloc_ireg_mp (cfg);
4117 int mult_reg = alloc_preg (cfg);
4118 int mult2_reg = alloc_preg (cfg);
4119 int low1_reg = alloc_preg (cfg);
4120 int low2_reg = alloc_preg (cfg);
4121 int high1_reg = alloc_preg (cfg);
4122 int high2_reg = alloc_preg (cfg);
4123 int realidx1_reg = alloc_preg (cfg);
4124 int realidx2_reg = alloc_preg (cfg);
4125 int sum_reg = alloc_preg (cfg);
4130 mono_class_init (klass);
4131 size = mono_class_array_element_size (klass);
4133 index1 = index_ins1->dreg;
4134 index2 = index_ins2->dreg;
4136 /* range checking */
4137 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
4138 arr->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
4140 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
4141 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4142 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
4143 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
4144 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
4145 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
4146 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4148 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
4149 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4150 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
4151 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
4152 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, length));
4153 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
4154 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4156 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
4157 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
4158 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
4159 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
4160 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
4162 ins->type = STACK_MP;
4164 MONO_ADD_INS (cfg->cbb, ins);
4171 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
4175 MonoMethod *addr_method;
4178 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
4181 return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], TRUE);
4183 #ifndef MONO_ARCH_EMULATE_MUL_DIV
4184 /* emit_ldelema_2 depends on OP_LMUL */
4185 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS)) {
4186 return mini_emit_ldelema_2_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], sp [2]);
4190 element_size = mono_class_array_element_size (cmethod->klass->element_class);
4191 addr_method = mono_marshal_get_array_address (rank, element_size);
4192 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
4197 static MonoBreakPolicy
4198 always_insert_breakpoint (MonoMethod *method)
4200 return MONO_BREAK_POLICY_ALWAYS;
4203 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
4206 * mono_set_break_policy:
4207 * policy_callback: the new callback function
4209 * Allow embedders to decide wherther to actually obey breakpoint instructions
4210 * (both break IL instructions and Debugger.Break () method calls), for example
4211 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
4212 * untrusted or semi-trusted code.
4214 * @policy_callback will be called every time a break point instruction needs to
4215 * be inserted with the method argument being the method that calls Debugger.Break()
4216 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
4217 * if it wants the breakpoint to not be effective in the given method.
4218 * #MONO_BREAK_POLICY_ALWAYS is the default.
4221 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
4223 if (policy_callback)
4224 break_policy_func = policy_callback;
4226 break_policy_func = always_insert_breakpoint;
4230 should_insert_brekpoint (MonoMethod *method) {
4231 switch (break_policy_func (method)) {
4232 case MONO_BREAK_POLICY_ALWAYS:
4234 case MONO_BREAK_POLICY_NEVER:
4236 case MONO_BREAK_POLICY_ON_DBG:
4237 return mono_debug_using_mono_debugger ();
4239 g_warning ("Incorrect value returned from break policy callback");
4244 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
4246 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
4248 MonoInst *addr, *store, *load;
4249 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
4251 /* the bounds check is already done by the callers */
4252 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
4254 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
4255 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
4257 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
4258 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
4264 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4266 MonoInst *ins = NULL;
4267 #ifdef MONO_ARCH_SIMD_INTRINSICS
4268 if (cfg->opt & MONO_OPT_SIMD) {
4269 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
4279 emit_memory_barrier (MonoCompile *cfg, int kind)
4281 MonoInst *ins = NULL;
4282 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
4283 MONO_ADD_INS (cfg->cbb, ins);
4284 ins->backend.memory_barrier_kind = kind;
4290 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4292 MonoInst *ins = NULL;
4294 static MonoClass *runtime_helpers_class = NULL;
4295 if (! runtime_helpers_class)
4296 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
4297 "System.Runtime.CompilerServices", "RuntimeHelpers");
4299 if (cmethod->klass == mono_defaults.string_class) {
4300 if (strcmp (cmethod->name, "get_Chars") == 0) {
4301 int dreg = alloc_ireg (cfg);
4302 int index_reg = alloc_preg (cfg);
4303 int mult_reg = alloc_preg (cfg);
4304 int add_reg = alloc_preg (cfg);
4306 #if SIZEOF_REGISTER == 8
4307 /* The array reg is 64 bits but the index reg is only 32 */
4308 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
4310 index_reg = args [1]->dreg;
4312 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
4314 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4315 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, G_STRUCT_OFFSET (MonoString, chars));
4316 add_reg = ins->dreg;
4317 /* Avoid a warning */
4319 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
4322 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
4323 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
4324 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
4325 add_reg, G_STRUCT_OFFSET (MonoString, chars));
4327 type_from_op (ins, NULL, NULL);
4329 } else if (strcmp (cmethod->name, "get_Length") == 0) {
4330 int dreg = alloc_ireg (cfg);
4331 /* Decompose later to allow more optimizations */
4332 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
4333 ins->type = STACK_I4;
4334 ins->flags |= MONO_INST_FAULT;
4335 cfg->cbb->has_array_access = TRUE;
4336 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
4339 } else if (strcmp (cmethod->name, "InternalSetChar") == 0) {
4340 int mult_reg = alloc_preg (cfg);
4341 int add_reg = alloc_preg (cfg);
4343 /* The corlib functions check for oob already. */
4344 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
4345 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
4346 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, G_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
4347 return cfg->cbb->last_ins;
4350 } else if (cmethod->klass == mono_defaults.object_class) {
4352 if (strcmp (cmethod->name, "GetType") == 0) {
4353 int dreg = alloc_ireg_ref (cfg);
4354 int vt_reg = alloc_preg (cfg);
4355 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
4356 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, G_STRUCT_OFFSET (MonoVTable, type));
4357 type_from_op (ins, NULL, NULL);
4360 #if !defined(MONO_ARCH_EMULATE_MUL_DIV)
4361 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0 && !mono_gc_is_moving ()) {
4362 int dreg = alloc_ireg (cfg);
4363 int t1 = alloc_ireg (cfg);
4365 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
4366 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
4367 ins->type = STACK_I4;
4371 } else if (strcmp (cmethod->name, ".ctor") == 0) {
4372 MONO_INST_NEW (cfg, ins, OP_NOP);
4373 MONO_ADD_INS (cfg->cbb, ins);
4377 } else if (cmethod->klass == mono_defaults.array_class) {
4378 if (strcmp (cmethod->name + 1, "etGenericValueImpl") == 0)
4379 return emit_array_generic_access (cfg, fsig, args, *cmethod->name == 'S');
4381 #ifndef MONO_BIG_ARRAYS
4383 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
4386 if ((strcmp (cmethod->name, "GetLength") == 0 || strcmp (cmethod->name, "GetLowerBound") == 0) && args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
4387 int dreg = alloc_ireg (cfg);
4388 int bounds_reg = alloc_ireg_mp (cfg);
4389 MonoBasicBlock *end_bb, *szarray_bb;
4390 gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
4392 NEW_BBLOCK (cfg, end_bb);
4393 NEW_BBLOCK (cfg, szarray_bb);
4395 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
4396 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
4397 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
4398 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
4399 /* Non-szarray case */
4401 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4402 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
4404 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4405 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4406 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4407 MONO_START_BB (cfg, szarray_bb);
4410 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4411 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
4413 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4414 MONO_START_BB (cfg, end_bb);
4416 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
4417 ins->type = STACK_I4;
4423 if (cmethod->name [0] != 'g')
4426 if (strcmp (cmethod->name, "get_Rank") == 0) {
4427 int dreg = alloc_ireg (cfg);
4428 int vtable_reg = alloc_preg (cfg);
4429 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
4430 args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
4431 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
4432 vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
4433 type_from_op (ins, NULL, NULL);
4436 } else if (strcmp (cmethod->name, "get_Length") == 0) {
4437 int dreg = alloc_ireg (cfg);
4439 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4440 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
4441 type_from_op (ins, NULL, NULL);
4446 } else if (cmethod->klass == runtime_helpers_class) {
4448 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0) {
4449 EMIT_NEW_ICONST (cfg, ins, G_STRUCT_OFFSET (MonoString, chars));
4453 } else if (cmethod->klass == mono_defaults.thread_class) {
4454 if (strcmp (cmethod->name, "SpinWait_nop") == 0) {
4455 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
4456 MONO_ADD_INS (cfg->cbb, ins);
4458 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0) {
4459 return emit_memory_barrier (cfg, FullBarrier);
4461 } else if (cmethod->klass == mono_defaults.monitor_class) {
4463 /* FIXME this should be integrated to the check below once we support the trampoline version */
4464 #if defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
4465 if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 2) {
4466 MonoMethod *fast_method = NULL;
4468 /* Avoid infinite recursion */
4469 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN && !strcmp (cfg->method->name, "FastMonitorEnterV4"))
4472 fast_method = mono_monitor_get_fast_path (cmethod);
4476 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
4480 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
4481 if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 1) {
4484 if (COMPILE_LLVM (cfg)) {
4486 * Pass the argument normally, the LLVM backend will handle the
4487 * calling convention problems.
4489 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
4491 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
4492 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
4493 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
4494 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
4497 return (MonoInst*)call;
4498 } else if (strcmp (cmethod->name, "Exit") == 0) {
4501 if (COMPILE_LLVM (cfg)) {
4502 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
4504 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
4505 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
4506 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
4507 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
4510 return (MonoInst*)call;
4512 #elif defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
4514 MonoMethod *fast_method = NULL;
4516 /* Avoid infinite recursion */
4517 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN &&
4518 (strcmp (cfg->method->name, "FastMonitorEnter") == 0 ||
4519 strcmp (cfg->method->name, "FastMonitorExit") == 0))
4522 if ((strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 2) ||
4523 strcmp (cmethod->name, "Exit") == 0)
4524 fast_method = mono_monitor_get_fast_path (cmethod);
4528 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
4531 } else if (cmethod->klass->image == mono_defaults.corlib &&
4532 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
4533 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
4536 #if SIZEOF_REGISTER == 8
4537 if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
4538 /* 64 bit reads are already atomic */
4539 MONO_INST_NEW (cfg, ins, OP_LOADI8_MEMBASE);
4540 ins->dreg = mono_alloc_preg (cfg);
4541 ins->inst_basereg = args [0]->dreg;
4542 ins->inst_offset = 0;
4543 MONO_ADD_INS (cfg->cbb, ins);
4547 #ifdef MONO_ARCH_HAVE_ATOMIC_ADD
4548 if (strcmp (cmethod->name, "Increment") == 0) {
4549 MonoInst *ins_iconst;
4552 if (fsig->params [0]->type == MONO_TYPE_I4)
4553 opcode = OP_ATOMIC_ADD_NEW_I4;
4554 #if SIZEOF_REGISTER == 8
4555 else if (fsig->params [0]->type == MONO_TYPE_I8)
4556 opcode = OP_ATOMIC_ADD_NEW_I8;
4559 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
4560 ins_iconst->inst_c0 = 1;
4561 ins_iconst->dreg = mono_alloc_ireg (cfg);
4562 MONO_ADD_INS (cfg->cbb, ins_iconst);
4564 MONO_INST_NEW (cfg, ins, opcode);
4565 ins->dreg = mono_alloc_ireg (cfg);
4566 ins->inst_basereg = args [0]->dreg;
4567 ins->inst_offset = 0;
4568 ins->sreg2 = ins_iconst->dreg;
4569 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4570 MONO_ADD_INS (cfg->cbb, ins);
4572 } else if (strcmp (cmethod->name, "Decrement") == 0) {
4573 MonoInst *ins_iconst;
4576 if (fsig->params [0]->type == MONO_TYPE_I4)
4577 opcode = OP_ATOMIC_ADD_NEW_I4;
4578 #if SIZEOF_REGISTER == 8
4579 else if (fsig->params [0]->type == MONO_TYPE_I8)
4580 opcode = OP_ATOMIC_ADD_NEW_I8;
4583 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
4584 ins_iconst->inst_c0 = -1;
4585 ins_iconst->dreg = mono_alloc_ireg (cfg);
4586 MONO_ADD_INS (cfg->cbb, ins_iconst);
4588 MONO_INST_NEW (cfg, ins, opcode);
4589 ins->dreg = mono_alloc_ireg (cfg);
4590 ins->inst_basereg = args [0]->dreg;
4591 ins->inst_offset = 0;
4592 ins->sreg2 = ins_iconst->dreg;
4593 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4594 MONO_ADD_INS (cfg->cbb, ins);
4596 } else if (strcmp (cmethod->name, "Add") == 0) {
4599 if (fsig->params [0]->type == MONO_TYPE_I4)
4600 opcode = OP_ATOMIC_ADD_NEW_I4;
4601 #if SIZEOF_REGISTER == 8
4602 else if (fsig->params [0]->type == MONO_TYPE_I8)
4603 opcode = OP_ATOMIC_ADD_NEW_I8;
4607 MONO_INST_NEW (cfg, ins, opcode);
4608 ins->dreg = mono_alloc_ireg (cfg);
4609 ins->inst_basereg = args [0]->dreg;
4610 ins->inst_offset = 0;
4611 ins->sreg2 = args [1]->dreg;
4612 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4613 MONO_ADD_INS (cfg->cbb, ins);
4616 #endif /* MONO_ARCH_HAVE_ATOMIC_ADD */
4618 #ifdef MONO_ARCH_HAVE_ATOMIC_EXCHANGE
4619 if (strcmp (cmethod->name, "Exchange") == 0) {
4621 gboolean is_ref = fsig->params [0]->type == MONO_TYPE_OBJECT;
4623 if (fsig->params [0]->type == MONO_TYPE_I4)
4624 opcode = OP_ATOMIC_EXCHANGE_I4;
4625 #if SIZEOF_REGISTER == 8
4626 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I8) ||
4627 (fsig->params [0]->type == MONO_TYPE_I))
4628 opcode = OP_ATOMIC_EXCHANGE_I8;
4630 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I))
4631 opcode = OP_ATOMIC_EXCHANGE_I4;
4636 MONO_INST_NEW (cfg, ins, opcode);
4637 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : mono_alloc_ireg (cfg);
4638 ins->inst_basereg = args [0]->dreg;
4639 ins->inst_offset = 0;
4640 ins->sreg2 = args [1]->dreg;
4641 MONO_ADD_INS (cfg->cbb, ins);
4643 switch (fsig->params [0]->type) {
4645 ins->type = STACK_I4;
4649 ins->type = STACK_I8;
4651 case MONO_TYPE_OBJECT:
4652 ins->type = STACK_OBJ;
4655 g_assert_not_reached ();
4658 if (cfg->gen_write_barriers && is_ref)
4659 emit_write_barrier (cfg, args [0], args [1], -1);
4661 #endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
4663 #ifdef MONO_ARCH_HAVE_ATOMIC_CAS
4664 if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
4666 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [1]);
4667 if (fsig->params [1]->type == MONO_TYPE_I4)
4669 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I)
4670 size = sizeof (gpointer);
4671 else if (sizeof (gpointer) == 8 && fsig->params [1]->type == MONO_TYPE_I8)
4674 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
4675 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
4676 ins->sreg1 = args [0]->dreg;
4677 ins->sreg2 = args [1]->dreg;
4678 ins->sreg3 = args [2]->dreg;
4679 ins->type = STACK_I4;
4680 MONO_ADD_INS (cfg->cbb, ins);
4681 } else if (size == 8) {
4682 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I8);
4683 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
4684 ins->sreg1 = args [0]->dreg;
4685 ins->sreg2 = args [1]->dreg;
4686 ins->sreg3 = args [2]->dreg;
4687 ins->type = STACK_I8;
4688 MONO_ADD_INS (cfg->cbb, ins);
4690 /* g_assert_not_reached (); */
4692 if (cfg->gen_write_barriers && is_ref)
4693 emit_write_barrier (cfg, args [0], args [1], -1);
4695 #endif /* MONO_ARCH_HAVE_ATOMIC_CAS */
4699 } else if (cmethod->klass->image == mono_defaults.corlib) {
4700 if (cmethod->name [0] == 'B' && strcmp (cmethod->name, "Break") == 0
4701 && strcmp (cmethod->klass->name, "Debugger") == 0) {
4702 if (should_insert_brekpoint (cfg->method)) {
4703 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
4705 MONO_INST_NEW (cfg, ins, OP_NOP);
4706 MONO_ADD_INS (cfg->cbb, ins);
4710 if (cmethod->name [0] == 'g' && strcmp (cmethod->name, "get_IsRunningOnWindows") == 0
4711 && strcmp (cmethod->klass->name, "Environment") == 0) {
4713 EMIT_NEW_ICONST (cfg, ins, 1);
4715 EMIT_NEW_ICONST (cfg, ins, 0);
4719 } else if (cmethod->klass == mono_defaults.math_class) {
4721 * There is general branches code for Min/Max, but it does not work for
4723 * http://everything2.com/?node_id=1051618
4727 #ifdef MONO_ARCH_SIMD_INTRINSICS
4728 if (cfg->opt & MONO_OPT_SIMD) {
4729 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
4735 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
4739 * This entry point could be used later for arbitrary method
4742 inline static MonoInst*
4743 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
4744 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
4746 if (method->klass == mono_defaults.string_class) {
4747 /* managed string allocation support */
4748 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS) && !(cfg->opt & MONO_OPT_SHARED)) {
4749 MonoInst *iargs [2];
4750 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4751 MonoMethod *managed_alloc = NULL;
4753 g_assert (vtable); /*Should not fail since it System.String*/
4754 #ifndef MONO_CROSS_COMPILE
4755 managed_alloc = mono_gc_get_managed_allocator (vtable, FALSE);
4759 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4760 iargs [1] = args [0];
4761 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
4768 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
4770 MonoInst *store, *temp;
4773 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
4774 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
4777 * FIXME: We should use *args++ = sp [0], but that would mean the arg
4778 * would be different than the MonoInst's used to represent arguments, and
4779 * the ldelema implementation can't deal with that.
4780 * Solution: When ldelema is used on an inline argument, create a var for
4781 * it, emit ldelema on that var, and emit the saving code below in
4782 * inline_method () if needed.
4784 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
4785 cfg->args [i] = temp;
4786 /* This uses cfg->args [i] which is set by the preceeding line */
4787 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
4788 store->cil_code = sp [0]->cil_code;
4793 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
4794 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
4796 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4798 check_inline_called_method_name_limit (MonoMethod *called_method)
4801 static char *limit = NULL;
4803 if (limit == NULL) {
4804 char *limit_string = getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
4806 if (limit_string != NULL)
4807 limit = limit_string;
4809 limit = (char *) "";
4812 if (limit [0] != '\0') {
4813 char *called_method_name = mono_method_full_name (called_method, TRUE);
4815 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
4816 g_free (called_method_name);
4818 //return (strncmp_result <= 0);
4819 return (strncmp_result == 0);
4826 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4828 check_inline_caller_method_name_limit (MonoMethod *caller_method)
4831 static char *limit = NULL;
4833 if (limit == NULL) {
4834 char *limit_string = getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
4835 if (limit_string != NULL) {
4836 limit = limit_string;
4838 limit = (char *) "";
4842 if (limit [0] != '\0') {
4843 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
4845 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
4846 g_free (caller_method_name);
4848 //return (strncmp_result <= 0);
4849 return (strncmp_result == 0);
4857 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
4858 guchar *ip, guint real_offset, GList *dont_inline, gboolean inline_always)
4860 MonoInst *ins, *rvar = NULL;
4861 MonoMethodHeader *cheader;
4862 MonoBasicBlock *ebblock, *sbblock;
4864 MonoMethod *prev_inlined_method;
4865 MonoInst **prev_locals, **prev_args;
4866 MonoType **prev_arg_types;
4867 guint prev_real_offset;
4868 GHashTable *prev_cbb_hash;
4869 MonoBasicBlock **prev_cil_offset_to_bb;
4870 MonoBasicBlock *prev_cbb;
4871 unsigned char* prev_cil_start;
4872 guint32 prev_cil_offset_to_bb_len;
4873 MonoMethod *prev_current_method;
4874 MonoGenericContext *prev_generic_context;
4875 gboolean ret_var_set, prev_ret_var_set, virtual = FALSE;
4877 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
4879 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4880 if ((! inline_always) && ! check_inline_called_method_name_limit (cmethod))
4883 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4884 if ((! inline_always) && ! check_inline_caller_method_name_limit (cfg->method))
4888 if (cfg->verbose_level > 2)
4889 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4891 if (!cmethod->inline_info) {
4892 cfg->stat_inlineable_methods++;
4893 cmethod->inline_info = 1;
4896 /* allocate local variables */
4897 cheader = mono_method_get_header (cmethod);
4899 if (cheader == NULL || mono_loader_get_last_error ()) {
4900 MonoLoaderError *error = mono_loader_get_last_error ();
4903 mono_metadata_free_mh (cheader);
4904 if (inline_always && error)
4905 mono_cfg_set_exception (cfg, error->exception_type);
4907 mono_loader_clear_error ();
4911 /*Must verify before creating locals as it can cause the JIT to assert.*/
4912 if (mono_compile_is_broken (cfg, cmethod, FALSE)) {
4913 mono_metadata_free_mh (cheader);
4917 /* allocate space to store the return value */
4918 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
4919 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
4922 prev_locals = cfg->locals;
4923 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
4924 for (i = 0; i < cheader->num_locals; ++i)
4925 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
4927 /* allocate start and end blocks */
4928 /* This is needed so if the inline is aborted, we can clean up */
4929 NEW_BBLOCK (cfg, sbblock);
4930 sbblock->real_offset = real_offset;
4932 NEW_BBLOCK (cfg, ebblock);
4933 ebblock->block_num = cfg->num_bblocks++;
4934 ebblock->real_offset = real_offset;
4936 prev_args = cfg->args;
4937 prev_arg_types = cfg->arg_types;
4938 prev_inlined_method = cfg->inlined_method;
4939 cfg->inlined_method = cmethod;
4940 cfg->ret_var_set = FALSE;
4941 cfg->inline_depth ++;
4942 prev_real_offset = cfg->real_offset;
4943 prev_cbb_hash = cfg->cbb_hash;
4944 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
4945 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
4946 prev_cil_start = cfg->cil_start;
4947 prev_cbb = cfg->cbb;
4948 prev_current_method = cfg->current_method;
4949 prev_generic_context = cfg->generic_context;
4950 prev_ret_var_set = cfg->ret_var_set;
4952 if (*ip == CEE_CALLVIRT && !(cmethod->flags & METHOD_ATTRIBUTE_STATIC))
4955 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, dont_inline, sp, real_offset, virtual);
4957 ret_var_set = cfg->ret_var_set;
4959 cfg->inlined_method = prev_inlined_method;
4960 cfg->real_offset = prev_real_offset;
4961 cfg->cbb_hash = prev_cbb_hash;
4962 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
4963 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
4964 cfg->cil_start = prev_cil_start;
4965 cfg->locals = prev_locals;
4966 cfg->args = prev_args;
4967 cfg->arg_types = prev_arg_types;
4968 cfg->current_method = prev_current_method;
4969 cfg->generic_context = prev_generic_context;
4970 cfg->ret_var_set = prev_ret_var_set;
4971 cfg->inline_depth --;
4973 if ((costs >= 0 && costs < 60) || inline_always) {
4974 if (cfg->verbose_level > 2)
4975 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4977 cfg->stat_inlined_methods++;
4979 /* always add some code to avoid block split failures */
4980 MONO_INST_NEW (cfg, ins, OP_NOP);
4981 MONO_ADD_INS (prev_cbb, ins);
4983 prev_cbb->next_bb = sbblock;
4984 link_bblock (cfg, prev_cbb, sbblock);
4987 * Get rid of the begin and end bblocks if possible to aid local
4990 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
4992 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
4993 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
4995 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
4996 MonoBasicBlock *prev = ebblock->in_bb [0];
4997 mono_merge_basic_blocks (cfg, prev, ebblock);
4999 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
5000 mono_merge_basic_blocks (cfg, prev_cbb, prev);
5001 cfg->cbb = prev_cbb;
5009 * If the inlined method contains only a throw, then the ret var is not
5010 * set, so set it to a dummy value.
5013 static double r8_0 = 0.0;
5015 switch (rvar->type) {
5017 MONO_EMIT_NEW_ICONST (cfg, rvar->dreg, 0);
5020 MONO_EMIT_NEW_I8CONST (cfg, rvar->dreg, 0);
5025 MONO_EMIT_NEW_PCONST (cfg, rvar->dreg, 0);
5028 MONO_INST_NEW (cfg, ins, OP_R8CONST);
5029 ins->type = STACK_R8;
5030 ins->inst_p0 = (void*)&r8_0;
5031 ins->dreg = rvar->dreg;
5032 MONO_ADD_INS (cfg->cbb, ins);
5035 MONO_EMIT_NEW_VZERO (cfg, rvar->dreg, mono_class_from_mono_type (fsig->ret));
5038 g_assert_not_reached ();
5042 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
5045 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
5048 if (cfg->verbose_level > 2)
5049 printf ("INLINE ABORTED %s\n", mono_method_full_name (cmethod, TRUE));
5050 cfg->exception_type = MONO_EXCEPTION_NONE;
5051 mono_loader_clear_error ();
5053 /* This gets rid of the newly added bblocks */
5054 cfg->cbb = prev_cbb;
5056 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
5061 * Some of these comments may well be out-of-date.
5062 * Design decisions: we do a single pass over the IL code (and we do bblock
5063 * splitting/merging in the few cases when it's required: a back jump to an IL
5064 * address that was not already seen as bblock starting point).
5065 * Code is validated as we go (full verification is still better left to metadata/verify.c).
5066 * Complex operations are decomposed in simpler ones right away. We need to let the
5067 * arch-specific code peek and poke inside this process somehow (except when the
5068 * optimizations can take advantage of the full semantic info of coarse opcodes).
5069 * All the opcodes of the form opcode.s are 'normalized' to opcode.
5070 * MonoInst->opcode initially is the IL opcode or some simplification of that
5071 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
5072 * opcode with value bigger than OP_LAST.
5073 * At this point the IR can be handed over to an interpreter, a dumb code generator
5074 * or to the optimizing code generator that will translate it to SSA form.
5076 * Profiling directed optimizations.
5077 * We may compile by default with few or no optimizations and instrument the code
5078 * or the user may indicate what methods to optimize the most either in a config file
5079 * or through repeated runs where the compiler applies offline the optimizations to
5080 * each method and then decides if it was worth it.
5083 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
5084 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
5085 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
5086 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
5087 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
5088 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
5089 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
5090 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) {cfg->exception_ptr = klass; LOAD_ERROR;}
5092 /* offset from br.s -> br like opcodes */
5093 #define BIG_BRANCH_OFFSET 13
5096 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
5098 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
5100 return b == NULL || b == bb;
5104 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
5106 unsigned char *ip = start;
5107 unsigned char *target;
5110 MonoBasicBlock *bblock;
5111 const MonoOpcode *opcode;
5114 cli_addr = ip - start;
5115 i = mono_opcode_value ((const guint8 **)&ip, end);
5118 opcode = &mono_opcodes [i];
5119 switch (opcode->argument) {
5120 case MonoInlineNone:
5123 case MonoInlineString:
5124 case MonoInlineType:
5125 case MonoInlineField:
5126 case MonoInlineMethod:
5129 case MonoShortInlineR:
5136 case MonoShortInlineVar:
5137 case MonoShortInlineI:
5140 case MonoShortInlineBrTarget:
5141 target = start + cli_addr + 2 + (signed char)ip [1];
5142 GET_BBLOCK (cfg, bblock, target);
5145 GET_BBLOCK (cfg, bblock, ip);
5147 case MonoInlineBrTarget:
5148 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
5149 GET_BBLOCK (cfg, bblock, target);
5152 GET_BBLOCK (cfg, bblock, ip);
5154 case MonoInlineSwitch: {
5155 guint32 n = read32 (ip + 1);
5158 cli_addr += 5 + 4 * n;
5159 target = start + cli_addr;
5160 GET_BBLOCK (cfg, bblock, target);
5162 for (j = 0; j < n; ++j) {
5163 target = start + cli_addr + (gint32)read32 (ip);
5164 GET_BBLOCK (cfg, bblock, target);
5174 g_assert_not_reached ();
5177 if (i == CEE_THROW) {
5178 unsigned char *bb_start = ip - 1;
5180 /* Find the start of the bblock containing the throw */
5182 while ((bb_start >= start) && !bblock) {
5183 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
5187 bblock->out_of_line = 1;
5196 static inline MonoMethod *
5197 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
5201 if (m->wrapper_type != MONO_WRAPPER_NONE)
5202 return mono_method_get_wrapper_data (m, token);
5204 method = mono_get_method_full (m->klass->image, token, klass, context);
5209 static inline MonoMethod *
5210 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
5212 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
5214 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
5220 static inline MonoClass*
5221 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
5225 if (method->wrapper_type != MONO_WRAPPER_NONE)
5226 klass = mono_method_get_wrapper_data (method, token);
5228 klass = mono_class_get_full (method->klass->image, token, context);
5230 mono_class_init (klass);
5235 * Returns TRUE if the JIT should abort inlining because "callee"
5236 * is influenced by security attributes.
5239 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
5243 if ((cfg->method != caller) && mono_method_has_declsec (callee)) {
5247 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
5248 if (result == MONO_JIT_SECURITY_OK)
5251 if (result == MONO_JIT_LINKDEMAND_ECMA) {
5252 /* Generate code to throw a SecurityException before the actual call/link */
5253 MonoSecurityManager *secman = mono_security_manager_get_methods ();
5256 NEW_ICONST (cfg, args [0], 4);
5257 NEW_METHODCONST (cfg, args [1], caller);
5258 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
5259 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
5260 /* don't hide previous results */
5261 mono_cfg_set_exception (cfg, MONO_EXCEPTION_SECURITY_LINKDEMAND);
5262 cfg->exception_data = result;
5270 throw_exception (void)
5272 static MonoMethod *method = NULL;
5275 MonoSecurityManager *secman = mono_security_manager_get_methods ();
5276 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
5283 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
5285 MonoMethod *thrower = throw_exception ();
5288 EMIT_NEW_PCONST (cfg, args [0], ex);
5289 mono_emit_method_call (cfg, thrower, args, NULL);
5293 * Return the original method is a wrapper is specified. We can only access
5294 * the custom attributes from the original method.
5297 get_original_method (MonoMethod *method)
5299 if (method->wrapper_type == MONO_WRAPPER_NONE)
5302 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
5303 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
5306 /* in other cases we need to find the original method */
5307 return mono_marshal_method_from_wrapper (method);
5311 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
5312 MonoBasicBlock *bblock, unsigned char *ip)
5314 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
5315 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
5317 emit_throw_exception (cfg, ex);
5321 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
5322 MonoBasicBlock *bblock, unsigned char *ip)
5324 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
5325 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
5327 emit_throw_exception (cfg, ex);
5331 * Check that the IL instructions at ip are the array initialization
5332 * sequence and return the pointer to the data and the size.
5335 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
5338 * newarr[System.Int32]
5340 * ldtoken field valuetype ...
5341 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
5343 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
5344 guint32 token = read32 (ip + 7);
5345 guint32 field_token = read32 (ip + 2);
5346 guint32 field_index = field_token & 0xffffff;
5348 const char *data_ptr;
5350 MonoMethod *cmethod;
5351 MonoClass *dummy_class;
5352 MonoClassField *field = mono_field_from_token (method->klass->image, field_token, &dummy_class, NULL);
5358 *out_field_token = field_token;
5360 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
5363 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
5365 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
5366 case MONO_TYPE_BOOLEAN:
5370 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
5371 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
5372 case MONO_TYPE_CHAR:
5382 return NULL; /* stupid ARM FP swapped format */
5392 if (size > mono_type_size (field->type, &dummy_align))
5395 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
5396 if (!method->klass->image->dynamic) {
5397 field_index = read32 (ip + 2) & 0xffffff;
5398 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
5399 data_ptr = mono_image_rva_map (method->klass->image, rva);
5400 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
5401 /* for aot code we do the lookup on load */
5402 if (aot && data_ptr)
5403 return GUINT_TO_POINTER (rva);
5405 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
5407 data_ptr = mono_field_get_data (field);
5415 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
5417 char *method_fname = mono_method_full_name (method, TRUE);
5419 MonoMethodHeader *header = mono_method_get_header (method);
5421 if (header->code_size == 0)
5422 method_code = g_strdup ("method body is empty.");
5424 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
5425 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
5426 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
5427 g_free (method_fname);
5428 g_free (method_code);
5429 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
5433 set_exception_object (MonoCompile *cfg, MonoException *exception)
5435 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OBJECT_SUPPLIED);
5436 MONO_GC_REGISTER_ROOT_SINGLE (cfg->exception_ptr);
5437 cfg->exception_ptr = exception;
5441 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
5443 return mini_type_is_reference (cfg, &klass->byval_arg);
5447 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
5450 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
5451 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
5452 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
5453 /* Optimize reg-reg moves away */
5455 * Can't optimize other opcodes, since sp[0] might point to
5456 * the last ins of a decomposed opcode.
5458 sp [0]->dreg = (cfg)->locals [n]->dreg;
5460 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
5465 * ldloca inhibits many optimizations so try to get rid of it in common
5468 static inline unsigned char *
5469 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
5478 local = read16 (ip + 2);
5482 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
5483 gboolean skip = FALSE;
5485 /* From the INITOBJ case */
5486 token = read32 (ip + 2);
5487 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
5488 CHECK_TYPELOAD (klass);
5489 if (mini_type_is_reference (cfg, &klass->byval_arg)) {
5490 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5491 } else if (MONO_TYPE_ISSTRUCT (&klass->byval_arg)) {
5492 MONO_EMIT_NEW_VZERO (cfg, cfg->locals [local]->dreg, klass);
5505 is_exception_class (MonoClass *class)
5508 if (class == mono_defaults.exception_class)
5510 class = class->parent;
5516 * is_jit_optimizer_disabled:
5518 * Determine whenever M's assembly has a DebuggableAttribute with the
5519 * IsJITOptimizerDisabled flag set.
5522 is_jit_optimizer_disabled (MonoMethod *m)
5524 MonoAssembly *ass = m->klass->image->assembly;
5525 MonoCustomAttrInfo* attrs;
5526 static MonoClass *klass;
5528 gboolean val = FALSE;
5531 if (ass->jit_optimizer_disabled_inited)
5532 return ass->jit_optimizer_disabled;
5535 klass = mono_class_from_name (mono_defaults.corlib, "System.Diagnostics", "DebuggableAttribute");
5538 ass->jit_optimizer_disabled = FALSE;
5539 mono_memory_barrier ();
5540 ass->jit_optimizer_disabled_inited = TRUE;
5544 attrs = mono_custom_attrs_from_assembly (ass);
5546 for (i = 0; i < attrs->num_attrs; ++i) {
5547 MonoCustomAttrEntry *attr = &attrs->attrs [i];
5550 MonoMethodSignature *sig;
5552 if (!attr->ctor || attr->ctor->klass != klass)
5554 /* Decode the attribute. See reflection.c */
5555 len = attr->data_size;
5556 p = (const char*)attr->data;
5557 g_assert (read16 (p) == 0x0001);
5560 // FIXME: Support named parameters
5561 sig = mono_method_signature (attr->ctor);
5562 if (sig->param_count != 2 || sig->params [0]->type != MONO_TYPE_BOOLEAN || sig->params [1]->type != MONO_TYPE_BOOLEAN)
5564 /* Two boolean arguments */
5568 mono_custom_attrs_free (attrs);
5571 ass->jit_optimizer_disabled = val;
5572 mono_memory_barrier ();
5573 ass->jit_optimizer_disabled_inited = TRUE;
5579 is_supported_tail_call (MonoCompile *cfg, MonoMethod *method, MonoMethod *cmethod, MonoMethodSignature *fsig)
5581 gboolean supported_tail_call;
5584 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
5585 supported_tail_call = MONO_ARCH_USE_OP_TAIL_CALL (mono_method_signature (method), mono_method_signature (cmethod));
5587 supported_tail_call = mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret);
5590 for (i = 0; i < fsig->param_count; ++i) {
5591 if (fsig->params [i]->byref || fsig->params [i]->type == MONO_TYPE_PTR || fsig->params [i]->type == MONO_TYPE_FNPTR)
5592 /* These can point to the current method's stack */
5593 supported_tail_call = FALSE;
5595 if (fsig->hasthis && cmethod->klass->valuetype)
5596 /* this might point to the current method's stack */
5597 supported_tail_call = FALSE;
5598 if (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)
5599 supported_tail_call = FALSE;
5600 if (cfg->method->save_lmf)
5601 supported_tail_call = FALSE;
5602 if (cmethod->wrapper_type && cmethod->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
5603 supported_tail_call = FALSE;
5605 /* Debugging support */
5607 if (supported_tail_call) {
5608 static int count = 0;
5610 if (getenv ("COUNT")) {
5611 if (count == atoi (getenv ("COUNT")))
5612 printf ("LAST: %s\n", mono_method_full_name (cmethod, TRUE));
5613 if (count > atoi (getenv ("COUNT")))
5614 supported_tail_call = FALSE;
5619 return supported_tail_call;
5622 /* the JIT intercepts ldflda instructions to the tlsdata field in ThreadLocal<T> and redirects
5623 * it to the thread local value based on the tls_offset field. Every other kind of access to
5624 * the field causes an assert.
5627 is_magic_tls_access (MonoClassField *field)
5629 if (strcmp (field->name, "tlsdata"))
5631 if (strcmp (field->parent->name, "ThreadLocal`1"))
5633 return field->parent->image == mono_defaults.corlib;
5636 /* emits the code needed to access a managed tls var (like ThreadStatic)
5637 * with the value of the tls offset in offset_reg. thread_ins represents the MonoInternalThread
5638 * pointer for the current thread.
5639 * Returns the MonoInst* representing the address of the tls var.
5642 emit_managed_static_data_access (MonoCompile *cfg, MonoInst *thread_ins, int offset_reg)
5645 int static_data_reg, array_reg, dreg;
5646 int offset2_reg, idx_reg;
5647 // inlined access to the tls data
5648 // idx = (offset >> 24) - 1;
5649 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
5650 static_data_reg = alloc_ireg (cfg);
5651 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, G_STRUCT_OFFSET (MonoInternalThread, static_data));
5652 idx_reg = alloc_ireg (cfg);
5653 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
5654 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
5655 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
5656 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
5657 array_reg = alloc_ireg (cfg);
5658 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
5659 offset2_reg = alloc_ireg (cfg);
5660 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
5661 dreg = alloc_ireg (cfg);
5662 EMIT_NEW_BIALU (cfg, addr, OP_PADD, dreg, array_reg, offset2_reg);
5667 * redirect access to the tlsdata field to the tls var given by the tls_offset field.
5668 * this address is cached per-method in cached_tls_addr.
5671 create_magic_tls_access (MonoCompile *cfg, MonoClassField *tls_field, MonoInst **cached_tls_addr, MonoInst *thread_local)
5673 MonoInst *load, *addr, *temp, *store, *thread_ins;
5674 MonoClassField *offset_field;
5676 if (*cached_tls_addr) {
5677 EMIT_NEW_TEMPLOAD (cfg, addr, (*cached_tls_addr)->inst_c0);
5680 thread_ins = mono_get_thread_intrinsic (cfg);
5681 offset_field = mono_class_get_field_from_name (tls_field->parent, "tls_offset");
5683 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, offset_field->type, thread_local->dreg, offset_field->offset);
5685 MONO_ADD_INS (cfg->cbb, thread_ins);
5687 MonoMethod *thread_method;
5688 thread_method = mono_class_get_method_from_name (mono_get_thread_class(), "CurrentInternalThread_internal", 0);
5689 thread_ins = mono_emit_method_call (cfg, thread_method, NULL, NULL);
5691 addr = emit_managed_static_data_access (cfg, thread_ins, load->dreg);
5692 addr->klass = mono_class_from_mono_type (tls_field->type);
5693 addr->type = STACK_MP;
5694 *cached_tls_addr = temp = mono_compile_create_var (cfg, type_from_stack_type (addr), OP_LOCAL);
5695 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, addr);
5697 EMIT_NEW_TEMPLOAD (cfg, addr, temp->inst_c0);
5702 * mono_method_to_ir:
5704 * Translate the .net IL into linear IR.
5707 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
5708 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
5709 guint inline_offset, gboolean is_virtual_call)
5712 MonoInst *ins, **sp, **stack_start;
5713 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
5714 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
5715 MonoMethod *cmethod, *method_definition;
5716 MonoInst **arg_array;
5717 MonoMethodHeader *header;
5719 guint32 token, ins_flag;
5721 MonoClass *constrained_call = NULL;
5722 unsigned char *ip, *end, *target, *err_pos;
5723 static double r8_0 = 0.0;
5724 MonoMethodSignature *sig;
5725 MonoGenericContext *generic_context = NULL;
5726 MonoGenericContainer *generic_container = NULL;
5727 MonoType **param_types;
5728 int i, n, start_new_bblock, dreg;
5729 int num_calls = 0, inline_costs = 0;
5730 int breakpoint_id = 0;
5732 MonoBoolean security, pinvoke;
5733 MonoSecurityManager* secman = NULL;
5734 MonoDeclSecurityActions actions;
5735 GSList *class_inits = NULL;
5736 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
5738 gboolean init_locals, seq_points, skip_dead_blocks;
5739 gboolean disable_inline;
5740 MonoInst *cached_tls_addr = NULL;
5742 disable_inline = is_jit_optimizer_disabled (method);
5744 /* serialization and xdomain stuff may need access to private fields and methods */
5745 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
5746 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
5747 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
5748 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
5749 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
5750 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
5752 dont_verify |= mono_security_get_mode () == MONO_SECURITY_MODE_SMCS_HACK;
5754 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
5755 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
5756 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
5757 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
5758 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_STELEMREF;
5760 image = method->klass->image;
5761 header = mono_method_get_header (method);
5763 MonoLoaderError *error;
5765 if ((error = mono_loader_get_last_error ())) {
5766 mono_cfg_set_exception (cfg, error->exception_type);
5768 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
5769 cfg->exception_message = g_strdup_printf ("Missing or incorrect header for method %s", cfg->method->name);
5771 goto exception_exit;
5773 generic_container = mono_method_get_generic_container (method);
5774 sig = mono_method_signature (method);
5775 num_args = sig->hasthis + sig->param_count;
5776 ip = (unsigned char*)header->code;
5777 cfg->cil_start = ip;
5778 end = ip + header->code_size;
5779 cfg->stat_cil_code_size += header->code_size;
5780 init_locals = header->init_locals;
5782 seq_points = cfg->gen_seq_points && cfg->method == method;
5785 * Methods without init_locals set could cause asserts in various passes
5790 method_definition = method;
5791 while (method_definition->is_inflated) {
5792 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
5793 method_definition = imethod->declaring;
5796 /* SkipVerification is not allowed if core-clr is enabled */
5797 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
5799 dont_verify_stloc = TRUE;
5802 if (mono_debug_using_mono_debugger ())
5803 cfg->keep_cil_nops = TRUE;
5805 if (sig->is_inflated)
5806 generic_context = mono_method_get_context (method);
5807 else if (generic_container)
5808 generic_context = &generic_container->context;
5809 cfg->generic_context = generic_context;
5811 if (!cfg->generic_sharing_context)
5812 g_assert (!sig->has_type_parameters);
5814 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
5815 g_assert (method->is_inflated);
5816 g_assert (mono_method_get_context (method)->method_inst);
5818 if (method->is_inflated && mono_method_get_context (method)->method_inst)
5819 g_assert (sig->generic_param_count);
5821 if (cfg->method == method) {
5822 cfg->real_offset = 0;
5824 cfg->real_offset = inline_offset;
5827 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
5828 cfg->cil_offset_to_bb_len = header->code_size;
5830 cfg->current_method = method;
5832 if (cfg->verbose_level > 2)
5833 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
5835 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
5837 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
5838 for (n = 0; n < sig->param_count; ++n)
5839 param_types [n + sig->hasthis] = sig->params [n];
5840 cfg->arg_types = param_types;
5842 dont_inline = g_list_prepend (dont_inline, method);
5843 if (cfg->method == method) {
5845 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
5846 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
5849 NEW_BBLOCK (cfg, start_bblock);
5850 cfg->bb_entry = start_bblock;
5851 start_bblock->cil_code = NULL;
5852 start_bblock->cil_length = 0;
5853 #if defined(__native_client_codegen__)
5854 MONO_INST_NEW (cfg, ins, OP_NACL_GC_SAFE_POINT);
5855 ins->dreg = alloc_dreg (cfg, STACK_I4);
5856 MONO_ADD_INS (start_bblock, ins);
5860 NEW_BBLOCK (cfg, end_bblock);
5861 cfg->bb_exit = end_bblock;
5862 end_bblock->cil_code = NULL;
5863 end_bblock->cil_length = 0;
5864 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
5865 g_assert (cfg->num_bblocks == 2);
5867 arg_array = cfg->args;
5869 if (header->num_clauses) {
5870 cfg->spvars = g_hash_table_new (NULL, NULL);
5871 cfg->exvars = g_hash_table_new (NULL, NULL);
5873 /* handle exception clauses */
5874 for (i = 0; i < header->num_clauses; ++i) {
5875 MonoBasicBlock *try_bb;
5876 MonoExceptionClause *clause = &header->clauses [i];
5877 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
5878 try_bb->real_offset = clause->try_offset;
5879 try_bb->try_start = TRUE;
5880 try_bb->region = ((i + 1) << 8) | clause->flags;
5881 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
5882 tblock->real_offset = clause->handler_offset;
5883 tblock->flags |= BB_EXCEPTION_HANDLER;
5885 link_bblock (cfg, try_bb, tblock);
5887 if (*(ip + clause->handler_offset) == CEE_POP)
5888 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
5890 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
5891 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
5892 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
5893 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5894 MONO_ADD_INS (tblock, ins);
5896 if (seq_points && clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY) {
5897 /* finally clauses already have a seq point */
5898 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
5899 MONO_ADD_INS (tblock, ins);
5902 /* todo: is a fault block unsafe to optimize? */
5903 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
5904 tblock->flags |= BB_EXCEPTION_UNSAFE;
5908 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
5910 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
5912 /* catch and filter blocks get the exception object on the stack */
5913 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
5914 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5915 MonoInst *dummy_use;
5917 /* mostly like handle_stack_args (), but just sets the input args */
5918 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
5919 tblock->in_scount = 1;
5920 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5921 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5924 * Add a dummy use for the exvar so its liveness info will be
5928 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
5930 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5931 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
5932 tblock->flags |= BB_EXCEPTION_HANDLER;
5933 tblock->real_offset = clause->data.filter_offset;
5934 tblock->in_scount = 1;
5935 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5936 /* The filter block shares the exvar with the handler block */
5937 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5938 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5939 MONO_ADD_INS (tblock, ins);
5943 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
5944 clause->data.catch_class &&
5945 cfg->generic_sharing_context &&
5946 mono_class_check_context_used (clause->data.catch_class)) {
5948 * In shared generic code with catch
5949 * clauses containing type variables
5950 * the exception handling code has to
5951 * be able to get to the rgctx.
5952 * Therefore we have to make sure that
5953 * the vtable/mrgctx argument (for
5954 * static or generic methods) or the
5955 * "this" argument (for non-static
5956 * methods) are live.
5958 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5959 mini_method_get_context (method)->method_inst ||
5960 method->klass->valuetype) {
5961 mono_get_vtable_var (cfg);
5963 MonoInst *dummy_use;
5965 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
5970 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
5971 cfg->cbb = start_bblock;
5972 cfg->args = arg_array;
5973 mono_save_args (cfg, sig, inline_args);
5976 /* FIRST CODE BLOCK */
5977 NEW_BBLOCK (cfg, bblock);
5978 bblock->cil_code = ip;
5982 ADD_BBLOCK (cfg, bblock);
5984 if (cfg->method == method) {
5985 breakpoint_id = mono_debugger_method_has_breakpoint (method);
5986 if (breakpoint_id && (mono_debug_format != MONO_DEBUG_FORMAT_DEBUGGER)) {
5987 MONO_INST_NEW (cfg, ins, OP_BREAK);
5988 MONO_ADD_INS (bblock, ins);
5992 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
5993 secman = mono_security_manager_get_methods ();
5995 security = (secman && mono_method_has_declsec (method));
5996 /* at this point having security doesn't mean we have any code to generate */
5997 if (security && (cfg->method == method)) {
5998 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
5999 * And we do not want to enter the next section (with allocation) if we
6000 * have nothing to generate */
6001 security = mono_declsec_get_demands (method, &actions);
6004 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
6005 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
6007 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
6008 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
6009 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
6011 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
6012 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
6016 mono_custom_attrs_free (custom);
6019 custom = mono_custom_attrs_from_class (wrapped->klass);
6020 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
6024 mono_custom_attrs_free (custom);
6027 /* not a P/Invoke after all */
6032 if ((init_locals || (cfg->method == method && (cfg->opt & MONO_OPT_SHARED))) || cfg->compile_aot || security || pinvoke) {
6033 /* we use a separate basic block for the initialization code */
6034 NEW_BBLOCK (cfg, init_localsbb);
6035 cfg->bb_init = init_localsbb;
6036 init_localsbb->real_offset = cfg->real_offset;
6037 start_bblock->next_bb = init_localsbb;
6038 init_localsbb->next_bb = bblock;
6039 link_bblock (cfg, start_bblock, init_localsbb);
6040 link_bblock (cfg, init_localsbb, bblock);
6042 cfg->cbb = init_localsbb;
6044 start_bblock->next_bb = bblock;
6045 link_bblock (cfg, start_bblock, bblock);
6048 /* at this point we know, if security is TRUE, that some code needs to be generated */
6049 if (security && (cfg->method == method)) {
6052 cfg->stat_cas_demand_generation++;
6054 if (actions.demand.blob) {
6055 /* Add code for SecurityAction.Demand */
6056 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
6057 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
6058 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
6059 mono_emit_method_call (cfg, secman->demand, args, NULL);
6061 if (actions.noncasdemand.blob) {
6062 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
6063 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
6064 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
6065 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
6066 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
6067 mono_emit_method_call (cfg, secman->demand, args, NULL);
6069 if (actions.demandchoice.blob) {
6070 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
6071 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
6072 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
6073 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
6074 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
6078 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
6080 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
6083 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
6084 /* check if this is native code, e.g. an icall or a p/invoke */
6085 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
6086 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
6088 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
6089 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
6091 /* if this ia a native call then it can only be JITted from platform code */
6092 if ((icall || pinvk) && method->klass && method->klass->image) {
6093 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
6094 MonoException *ex = icall ? mono_get_exception_security () :
6095 mono_get_exception_method_access ();
6096 emit_throw_exception (cfg, ex);
6103 if (header->code_size == 0)
6106 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
6111 if (cfg->method == method)
6112 mono_debug_init_method (cfg, bblock, breakpoint_id);
6114 for (n = 0; n < header->num_locals; ++n) {
6115 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
6120 /* We force the vtable variable here for all shared methods
6121 for the possibility that they might show up in a stack
6122 trace where their exact instantiation is needed. */
6123 if (cfg->generic_sharing_context && method == cfg->method) {
6124 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
6125 mini_method_get_context (method)->method_inst ||
6126 method->klass->valuetype) {
6127 mono_get_vtable_var (cfg);
6129 /* FIXME: Is there a better way to do this?
6130 We need the variable live for the duration
6131 of the whole method. */
6132 cfg->args [0]->flags |= MONO_INST_INDIRECT;
6136 /* add a check for this != NULL to inlined methods */
6137 if (is_virtual_call) {
6140 NEW_ARGLOAD (cfg, arg_ins, 0);
6141 MONO_ADD_INS (cfg->cbb, arg_ins);
6142 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
6145 skip_dead_blocks = !dont_verify;
6146 if (skip_dead_blocks) {
6147 original_bb = bb = mono_basic_block_split (method, &error);
6148 if (!mono_error_ok (&error)) {
6149 mono_error_cleanup (&error);
6155 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
6156 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
6159 start_new_bblock = 0;
6162 if (cfg->method == method)
6163 cfg->real_offset = ip - header->code;
6165 cfg->real_offset = inline_offset;
6170 if (start_new_bblock) {
6171 bblock->cil_length = ip - bblock->cil_code;
6172 if (start_new_bblock == 2) {
6173 g_assert (ip == tblock->cil_code);
6175 GET_BBLOCK (cfg, tblock, ip);
6177 bblock->next_bb = tblock;
6180 start_new_bblock = 0;
6181 for (i = 0; i < bblock->in_scount; ++i) {
6182 if (cfg->verbose_level > 3)
6183 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
6184 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
6188 g_slist_free (class_inits);
6191 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
6192 link_bblock (cfg, bblock, tblock);
6193 if (sp != stack_start) {
6194 handle_stack_args (cfg, stack_start, sp - stack_start);
6196 CHECK_UNVERIFIABLE (cfg);
6198 bblock->next_bb = tblock;
6201 for (i = 0; i < bblock->in_scount; ++i) {
6202 if (cfg->verbose_level > 3)
6203 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
6204 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
6207 g_slist_free (class_inits);
6212 if (skip_dead_blocks) {
6213 int ip_offset = ip - header->code;
6215 if (ip_offset == bb->end)
6219 int op_size = mono_opcode_size (ip, end);
6220 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
6222 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
6224 if (ip_offset + op_size == bb->end) {
6225 MONO_INST_NEW (cfg, ins, OP_NOP);
6226 MONO_ADD_INS (bblock, ins);
6227 start_new_bblock = 1;
6235 * Sequence points are points where the debugger can place a breakpoint.
6236 * Currently, we generate these automatically at points where the IL
6239 if (seq_points && sp == stack_start) {
6240 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
6241 MONO_ADD_INS (cfg->cbb, ins);
6244 bblock->real_offset = cfg->real_offset;
6246 if ((cfg->method == method) && cfg->coverage_info) {
6247 guint32 cil_offset = ip - header->code;
6248 cfg->coverage_info->data [cil_offset].cil_code = ip;
6250 /* TODO: Use an increment here */
6251 #if defined(TARGET_X86)
6252 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
6253 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
6255 MONO_ADD_INS (cfg->cbb, ins);
6257 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
6258 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
6262 if (cfg->verbose_level > 3)
6263 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
6267 if (cfg->keep_cil_nops)
6268 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
6270 MONO_INST_NEW (cfg, ins, OP_NOP);
6272 MONO_ADD_INS (bblock, ins);
6275 if (should_insert_brekpoint (cfg->method)) {
6276 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
6278 MONO_INST_NEW (cfg, ins, OP_NOP);
6281 MONO_ADD_INS (bblock, ins);
6287 CHECK_STACK_OVF (1);
6288 n = (*ip)-CEE_LDARG_0;
6290 EMIT_NEW_ARGLOAD (cfg, ins, n);
6298 CHECK_STACK_OVF (1);
6299 n = (*ip)-CEE_LDLOC_0;
6301 EMIT_NEW_LOCLOAD (cfg, ins, n);
6310 n = (*ip)-CEE_STLOC_0;
6313 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
6315 emit_stloc_ir (cfg, sp, header, n);
6322 CHECK_STACK_OVF (1);
6325 EMIT_NEW_ARGLOAD (cfg, ins, n);
6331 CHECK_STACK_OVF (1);
6334 NEW_ARGLOADA (cfg, ins, n);
6335 MONO_ADD_INS (cfg->cbb, ins);
6345 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
6347 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
6352 CHECK_STACK_OVF (1);
6355 EMIT_NEW_LOCLOAD (cfg, ins, n);
6359 case CEE_LDLOCA_S: {
6360 unsigned char *tmp_ip;
6362 CHECK_STACK_OVF (1);
6363 CHECK_LOCAL (ip [1]);
6365 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
6371 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
6380 CHECK_LOCAL (ip [1]);
6381 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
6383 emit_stloc_ir (cfg, sp, header, ip [1]);
6388 CHECK_STACK_OVF (1);
6389 EMIT_NEW_PCONST (cfg, ins, NULL);
6390 ins->type = STACK_OBJ;
6395 CHECK_STACK_OVF (1);
6396 EMIT_NEW_ICONST (cfg, ins, -1);
6409 CHECK_STACK_OVF (1);
6410 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
6416 CHECK_STACK_OVF (1);
6418 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
6424 CHECK_STACK_OVF (1);
6425 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
6431 CHECK_STACK_OVF (1);
6432 MONO_INST_NEW (cfg, ins, OP_I8CONST);
6433 ins->type = STACK_I8;
6434 ins->dreg = alloc_dreg (cfg, STACK_I8);
6436 ins->inst_l = (gint64)read64 (ip);
6437 MONO_ADD_INS (bblock, ins);
6443 gboolean use_aotconst = FALSE;
6445 #ifdef TARGET_POWERPC
6446 /* FIXME: Clean this up */
6447 if (cfg->compile_aot)
6448 use_aotconst = TRUE;
6451 /* FIXME: we should really allocate this only late in the compilation process */
6452 f = mono_domain_alloc (cfg->domain, sizeof (float));
6454 CHECK_STACK_OVF (1);
6460 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
6462 dreg = alloc_freg (cfg);
6463 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
6464 ins->type = STACK_R8;
6466 MONO_INST_NEW (cfg, ins, OP_R4CONST);
6467 ins->type = STACK_R8;
6468 ins->dreg = alloc_dreg (cfg, STACK_R8);
6470 MONO_ADD_INS (bblock, ins);
6480 gboolean use_aotconst = FALSE;
6482 #ifdef TARGET_POWERPC
6483 /* FIXME: Clean this up */
6484 if (cfg->compile_aot)
6485 use_aotconst = TRUE;
6488 /* FIXME: we should really allocate this only late in the compilation process */
6489 d = mono_domain_alloc (cfg->domain, sizeof (double));
6491 CHECK_STACK_OVF (1);
6497 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
6499 dreg = alloc_freg (cfg);
6500 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
6501 ins->type = STACK_R8;
6503 MONO_INST_NEW (cfg, ins, OP_R8CONST);
6504 ins->type = STACK_R8;
6505 ins->dreg = alloc_dreg (cfg, STACK_R8);
6507 MONO_ADD_INS (bblock, ins);
6516 MonoInst *temp, *store;
6518 CHECK_STACK_OVF (1);
6522 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
6523 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
6525 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
6528 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
6541 if (sp [0]->type == STACK_R8)
6542 /* we need to pop the value from the x86 FP stack */
6543 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
6552 if (stack_start != sp)
6554 token = read32 (ip + 1);
6555 /* FIXME: check the signature matches */
6556 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6558 if (!cmethod || mono_loader_get_last_error ())
6561 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
6562 GENERIC_SHARING_FAILURE (CEE_JMP);
6564 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
6565 CHECK_CFG_EXCEPTION;
6567 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6569 MonoMethodSignature *fsig = mono_method_signature (cmethod);
6572 /* Handle tail calls similarly to calls */
6573 n = fsig->param_count + fsig->hasthis;
6575 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
6576 call->method = cmethod;
6577 call->tail_call = TRUE;
6578 call->signature = mono_method_signature (cmethod);
6579 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
6580 call->inst.inst_p0 = cmethod;
6581 for (i = 0; i < n; ++i)
6582 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
6584 mono_arch_emit_call (cfg, call);
6585 MONO_ADD_INS (bblock, (MonoInst*)call);
6588 for (i = 0; i < num_args; ++i)
6589 /* Prevent arguments from being optimized away */
6590 arg_array [i]->flags |= MONO_INST_VOLATILE;
6592 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
6593 ins = (MonoInst*)call;
6594 ins->inst_p0 = cmethod;
6595 MONO_ADD_INS (bblock, ins);
6599 start_new_bblock = 1;
6604 case CEE_CALLVIRT: {
6605 MonoInst *addr = NULL;
6606 MonoMethodSignature *fsig = NULL;
6608 int virtual = *ip == CEE_CALLVIRT;
6609 int calli = *ip == CEE_CALLI;
6610 gboolean pass_imt_from_rgctx = FALSE;
6611 MonoInst *imt_arg = NULL;
6612 gboolean pass_vtable = FALSE;
6613 gboolean pass_mrgctx = FALSE;
6614 MonoInst *vtable_arg = NULL;
6615 gboolean check_this = FALSE;
6616 gboolean supported_tail_call = FALSE;
6619 token = read32 (ip + 1);
6626 if (method->wrapper_type != MONO_WRAPPER_NONE)
6627 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
6629 fsig = mono_metadata_parse_signature (image, token);
6631 n = fsig->param_count + fsig->hasthis;
6633 if (method->dynamic && fsig->pinvoke) {
6637 * This is a call through a function pointer using a pinvoke
6638 * signature. Have to create a wrapper and call that instead.
6639 * FIXME: This is very slow, need to create a wrapper at JIT time
6640 * instead based on the signature.
6642 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
6643 EMIT_NEW_PCONST (cfg, args [1], fsig);
6645 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
6648 MonoMethod *cil_method;
6650 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6651 if (constrained_call && cfg->verbose_level > 2)
6652 printf ("DM Constrained call to %s\n", mono_type_get_full_name (constrained_call));
6653 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
6654 cil_method = cmethod;
6655 if (constrained_call && !((constrained_call->byval_arg.type == MONO_TYPE_VAR ||
6656 constrained_call->byval_arg.type == MONO_TYPE_MVAR) &&
6657 cfg->generic_sharing_context)) {
6658 cmethod = mono_get_method_constrained_with_method (image, cil_method, constrained_call, generic_context);
6660 } else if (constrained_call) {
6661 if (cfg->verbose_level > 2)
6662 printf ("Constrained call to %s\n", mono_type_get_full_name (constrained_call));
6664 if ((constrained_call->byval_arg.type == MONO_TYPE_VAR || constrained_call->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
6666 * This is needed since get_method_constrained can't find
6667 * the method in klass representing a type var.
6668 * The type var is guaranteed to be a reference type in this
6671 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6672 cil_method = cmethod;
6673 g_assert (!cmethod->klass->valuetype);
6675 cmethod = mono_get_method_constrained (image, token, constrained_call, generic_context, &cil_method);
6678 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6679 cil_method = cmethod;
6682 if (!cmethod || mono_loader_get_last_error ())
6684 if (!dont_verify && !cfg->skip_visibility) {
6685 MonoMethod *target_method = cil_method;
6686 if (method->is_inflated) {
6687 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
6689 if (!mono_method_can_access_method (method_definition, target_method) &&
6690 !mono_method_can_access_method (method, cil_method))
6691 METHOD_ACCESS_FAILURE;
6694 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
6695 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
6697 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
6698 /* MS.NET seems to silently convert this to a callvirt */
6703 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
6704 * converts to a callvirt.
6706 * tests/bug-515884.il is an example of this behavior
6708 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
6709 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
6710 if (!virtual && cmethod->klass->marshalbyref && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
6714 if (!cmethod->klass->inited)
6715 if (!mono_class_init (cmethod->klass))
6718 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
6719 mini_class_is_system_array (cmethod->klass)) {
6720 array_rank = cmethod->klass->rank;
6721 fsig = mono_method_signature (cmethod);
6723 fsig = mono_method_signature (cmethod);
6728 if (fsig->pinvoke) {
6729 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
6730 check_for_pending_exc, FALSE);
6731 fsig = mono_method_signature (wrapper);
6732 } else if (constrained_call) {
6733 fsig = mono_method_signature (cmethod);
6735 fsig = mono_method_get_signature_full (cmethod, image, token, generic_context);
6739 mono_save_token_info (cfg, image, token, cil_method);
6741 n = fsig->param_count + fsig->hasthis;
6743 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
6744 if (check_linkdemand (cfg, method, cmethod))
6746 CHECK_CFG_EXCEPTION;
6749 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
6750 g_assert_not_reached ();
6753 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
6756 if (!cfg->generic_sharing_context && cmethod)
6757 g_assert (!mono_method_check_context_used (cmethod));
6761 //g_assert (!virtual || fsig->hasthis);
6765 if (constrained_call) {
6767 * We have the `constrained.' prefix opcode.
6769 if (constrained_call->valuetype && !cmethod->klass->valuetype) {
6771 * The type parameter is instantiated as a valuetype,
6772 * but that type doesn't override the method we're
6773 * calling, so we need to box `this'.
6775 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
6776 ins->klass = constrained_call;
6777 sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call));
6778 CHECK_CFG_EXCEPTION;
6779 } else if (!constrained_call->valuetype) {
6780 int dreg = alloc_ireg_ref (cfg);
6783 * The type parameter is instantiated as a reference
6784 * type. We have a managed pointer on the stack, so
6785 * we need to dereference it here.
6787 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
6788 ins->type = STACK_OBJ;
6790 } else if (cmethod->klass->valuetype)
6792 constrained_call = NULL;
6795 if (*ip != CEE_CALLI && check_call_signature (cfg, fsig, sp))
6799 * If the callee is a shared method, then its static cctor
6800 * might not get called after the call was patched.
6802 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable_impl (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
6803 emit_generic_class_init (cfg, cmethod->klass);
6804 CHECK_TYPELOAD (cmethod->klass);
6807 if (cmethod && ((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
6808 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
6809 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6810 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
6811 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6814 * Pass vtable iff target method might
6815 * be shared, which means that sharing
6816 * is enabled for its class and its
6817 * context is sharable (and it's not a
6820 if (sharing_enabled && context_sharable &&
6821 !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
6825 if (cmethod && mini_method_get_context (cmethod) &&
6826 mini_method_get_context (cmethod)->method_inst) {
6827 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6828 MonoGenericContext *context = mini_method_get_context (cmethod);
6829 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6831 g_assert (!pass_vtable);
6833 if (sharing_enabled && context_sharable)
6837 if (cfg->generic_sharing_context && cmethod) {
6838 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
6840 context_used = mono_method_check_context_used (cmethod);
6842 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
6843 /* Generic method interface
6844 calls are resolved via a
6845 helper function and don't
6847 if (!cmethod_context || !cmethod_context->method_inst)
6848 pass_imt_from_rgctx = TRUE;
6852 * If a shared method calls another
6853 * shared method then the caller must
6854 * have a generic sharing context
6855 * because the magic trampoline
6856 * requires it. FIXME: We shouldn't
6857 * have to force the vtable/mrgctx
6858 * variable here. Instead there
6859 * should be a flag in the cfg to
6860 * request a generic sharing context.
6863 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
6864 mono_get_vtable_var (cfg);
6869 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
6871 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
6873 CHECK_TYPELOAD (cmethod->klass);
6874 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
6879 g_assert (!vtable_arg);
6881 if (!cfg->compile_aot) {
6883 * emit_get_rgctx_method () calls mono_class_vtable () so check
6884 * for type load errors before.
6886 mono_class_setup_vtable (cmethod->klass);
6887 CHECK_TYPELOAD (cmethod->klass);
6890 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
6892 /* !marshalbyref is needed to properly handle generic methods + remoting */
6893 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
6894 MONO_METHOD_IS_FINAL (cmethod)) &&
6895 !cmethod->klass->marshalbyref) {
6902 if (pass_imt_from_rgctx) {
6903 g_assert (!pass_vtable);
6906 imt_arg = emit_get_rgctx_method (cfg, context_used,
6907 cmethod, MONO_RGCTX_INFO_METHOD);
6911 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
6913 /* Calling virtual generic methods */
6914 if (cmethod && virtual &&
6915 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
6916 !(MONO_METHOD_IS_FINAL (cmethod) &&
6917 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
6918 mono_method_signature (cmethod)->generic_param_count) {
6919 MonoInst *this_temp, *this_arg_temp, *store;
6920 MonoInst *iargs [4];
6922 g_assert (mono_method_signature (cmethod)->is_inflated);
6924 /* Prevent inlining of methods that contain indirect calls */
6927 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK && defined(MONO_ARCH_GSHARED_SUPPORTED)
6928 if (cmethod->wrapper_type == MONO_WRAPPER_NONE && mono_use_imt) {
6929 g_assert (!imt_arg);
6931 g_assert (cmethod->is_inflated);
6932 imt_arg = emit_get_rgctx_method (cfg, context_used,
6933 cmethod, MONO_RGCTX_INFO_METHOD);
6934 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp, sp [0], imt_arg, NULL);
6938 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
6939 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
6940 MONO_ADD_INS (bblock, store);
6942 /* FIXME: This should be a managed pointer */
6943 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
6945 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
6946 iargs [1] = emit_get_rgctx_method (cfg, context_used,
6947 cmethod, MONO_RGCTX_INFO_METHOD);
6948 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
6949 addr = mono_emit_jit_icall (cfg,
6950 mono_helper_compile_generic_method, iargs);
6952 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
6954 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL);
6957 if (!MONO_TYPE_IS_VOID (fsig->ret))
6958 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6960 CHECK_CFG_EXCEPTION;
6968 * Implement a workaround for the inherent races involved in locking:
6974 * If a thread abort happens between the call to Monitor.Enter () and the start of the
6975 * try block, the Exit () won't be executed, see:
6976 * http://www.bluebytesoftware.com/blog/2007/01/30/MonitorEnterThreadAbortsAndOrphanedLocks.aspx
6977 * To work around this, we extend such try blocks to include the last x bytes
6978 * of the Monitor.Enter () call.
6980 if (cmethod && cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
6981 MonoBasicBlock *tbb;
6983 GET_BBLOCK (cfg, tbb, ip + 5);
6985 * Only extend try blocks with a finally, to avoid catching exceptions thrown
6986 * from Monitor.Enter like ArgumentNullException.
6988 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
6989 /* Mark this bblock as needing to be extended */
6990 tbb->extend_try_block = TRUE;
6994 /* Conversion to a JIT intrinsic */
6995 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
6997 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6998 type_to_eval_stack_type ((cfg), fsig->ret, ins);
7003 CHECK_CFG_EXCEPTION;
7011 if ((cfg->opt & MONO_OPT_INLINE) && cmethod &&
7012 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
7013 !disable_inline && mono_method_check_inlining (cfg, cmethod) &&
7014 !g_list_find (dont_inline, cmethod)) {
7016 gboolean always = FALSE;
7018 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
7019 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
7020 /* Prevent inlining of methods that call wrappers */
7022 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
7026 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, always))) {
7028 cfg->real_offset += 5;
7031 if (!MONO_TYPE_IS_VOID (fsig->ret))
7032 /* *sp is already set by inline_method */
7035 inline_costs += costs;
7041 inline_costs += 10 * num_calls++;
7043 /* Tail recursion elimination */
7044 if ((cfg->opt & MONO_OPT_TAILC) && *ip == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
7045 gboolean has_vtargs = FALSE;
7048 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
7051 /* keep it simple */
7052 for (i = fsig->param_count - 1; i >= 0; i--) {
7053 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
7058 for (i = 0; i < n; ++i)
7059 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
7060 MONO_INST_NEW (cfg, ins, OP_BR);
7061 MONO_ADD_INS (bblock, ins);
7062 tblock = start_bblock->out_bb [0];
7063 link_bblock (cfg, bblock, tblock);
7064 ins->inst_target_bb = tblock;
7065 start_new_bblock = 1;
7067 /* skip the CEE_RET, too */
7068 if (ip_in_bb (cfg, bblock, ip + 5))
7078 /* Generic sharing */
7079 /* FIXME: only do this for generic methods if
7080 they are not shared! */
7081 if (context_used && !imt_arg && !array_rank &&
7082 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
7083 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
7084 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
7085 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
7088 g_assert (cfg->generic_sharing_context && cmethod);
7092 * We are compiling a call to a
7093 * generic method from shared code,
7094 * which means that we have to look up
7095 * the method in the rgctx and do an
7098 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7101 /* Indirect calls */
7103 g_assert (!imt_arg);
7105 if (*ip == CEE_CALL)
7106 g_assert (context_used);
7107 else if (*ip == CEE_CALLI)
7108 g_assert (!vtable_arg);
7110 /* FIXME: what the hell is this??? */
7111 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
7112 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
7114 /* Prevent inlining of methods with indirect calls */
7118 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, vtable_arg);
7120 if (addr->opcode == OP_AOTCONST && addr->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
7122 * Instead of emitting an indirect call, emit a direct call
7123 * with the contents of the aotconst as the patch info.
7125 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_p0, fsig, sp);
7127 } else if (addr->opcode == OP_GOT_ENTRY && addr->inst_right->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
7128 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_right->inst_left, fsig, sp);
7131 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL);
7134 if (!MONO_TYPE_IS_VOID (fsig->ret))
7135 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
7137 CHECK_CFG_EXCEPTION;
7148 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
7149 MonoInst *val = sp [fsig->param_count];
7151 if (val->type == STACK_OBJ) {
7152 MonoInst *iargs [2];
7157 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
7160 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
7161 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, val->dreg);
7162 if (cfg->gen_write_barriers && val->type == STACK_OBJ && !(val->opcode == OP_PCONST && val->inst_c0 == 0))
7163 emit_write_barrier (cfg, addr, val, 0);
7164 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
7165 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
7167 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
7170 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
7171 if (!cmethod->klass->element_class->valuetype && !readonly)
7172 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
7173 CHECK_TYPELOAD (cmethod->klass);
7176 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
7179 g_assert_not_reached ();
7182 CHECK_CFG_EXCEPTION;
7189 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
7191 if (!MONO_TYPE_IS_VOID (fsig->ret))
7192 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
7194 CHECK_CFG_EXCEPTION;
7201 /* Tail prefix / tail call optimization */
7203 /* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests */
7204 /* FIXME: runtime generic context pointer for jumps? */
7205 /* FIXME: handle this for generic sharing eventually */
7206 supported_tail_call = cmethod &&
7207 ((((ins_flag & MONO_INST_TAILCALL) && (*ip == CEE_CALL))
7208 ))//|| ((cfg->opt & MONO_OPT_TAILC) && *ip == CEE_CALL && ip [5] == CEE_RET))
7209 && !vtable_arg && !cfg->generic_sharing_context && is_supported_tail_call (cfg, method, cmethod, fsig);
7211 if (supported_tail_call) {
7214 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
7217 //printf ("HIT: %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
7219 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
7220 /* Handle tail calls similarly to calls */
7221 call = mono_emit_call_args (cfg, mono_method_signature (cmethod), sp, FALSE, FALSE, TRUE, FALSE);
7223 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
7224 call->tail_call = TRUE;
7225 call->method = cmethod;
7226 call->signature = mono_method_signature (cmethod);
7229 * We implement tail calls by storing the actual arguments into the
7230 * argument variables, then emitting a CEE_JMP.
7232 for (i = 0; i < n; ++i) {
7233 /* Prevent argument from being register allocated */
7234 arg_array [i]->flags |= MONO_INST_VOLATILE;
7235 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
7239 ins = (MonoInst*)call;
7240 ins->inst_p0 = cmethod;
7241 ins->inst_p1 = arg_array [0];
7242 MONO_ADD_INS (bblock, ins);
7243 link_bblock (cfg, bblock, end_bblock);
7244 start_new_bblock = 1;
7246 CHECK_CFG_EXCEPTION;
7251 // FIXME: Eliminate unreachable epilogs
7254 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
7255 * only reachable from this call.
7257 GET_BBLOCK (cfg, tblock, ip);
7258 if (tblock == bblock || tblock->in_count == 0)
7265 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL,
7266 imt_arg, vtable_arg);
7268 if (!MONO_TYPE_IS_VOID (fsig->ret))
7269 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
7271 CHECK_CFG_EXCEPTION;
7278 if (cfg->method != method) {
7279 /* return from inlined method */
7281 * If in_count == 0, that means the ret is unreachable due to
7282 * being preceeded by a throw. In that case, inline_method () will
7283 * handle setting the return value
7284 * (test case: test_0_inline_throw ()).
7286 if (return_var && cfg->cbb->in_count) {
7290 //g_assert (returnvar != -1);
7291 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
7292 cfg->ret_var_set = TRUE;
7296 MonoType *ret_type = mono_method_signature (method)->ret;
7300 * Place a seq point here too even through the IL stack is not
7301 * empty, so a step over on
7304 * will work correctly.
7306 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
7307 MONO_ADD_INS (cfg->cbb, ins);
7310 g_assert (!return_var);
7314 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
7317 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
7320 if (!cfg->vret_addr) {
7323 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
7325 EMIT_NEW_RETLOADA (cfg, ret_addr);
7327 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
7328 ins->klass = mono_class_from_mono_type (ret_type);
7331 #ifdef MONO_ARCH_SOFT_FLOAT
7332 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
7333 MonoInst *iargs [1];
7337 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
7338 mono_arch_emit_setret (cfg, method, conv);
7340 mono_arch_emit_setret (cfg, method, *sp);
7343 mono_arch_emit_setret (cfg, method, *sp);
7348 if (sp != stack_start)
7350 MONO_INST_NEW (cfg, ins, OP_BR);
7352 ins->inst_target_bb = end_bblock;
7353 MONO_ADD_INS (bblock, ins);
7354 link_bblock (cfg, bblock, end_bblock);
7355 start_new_bblock = 1;
7359 MONO_INST_NEW (cfg, ins, OP_BR);
7361 target = ip + 1 + (signed char)(*ip);
7363 GET_BBLOCK (cfg, tblock, target);
7364 link_bblock (cfg, bblock, tblock);
7365 ins->inst_target_bb = tblock;
7366 if (sp != stack_start) {
7367 handle_stack_args (cfg, stack_start, sp - stack_start);
7369 CHECK_UNVERIFIABLE (cfg);
7371 MONO_ADD_INS (bblock, ins);
7372 start_new_bblock = 1;
7373 inline_costs += BRANCH_COST;
7387 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
7389 target = ip + 1 + *(signed char*)ip;
7395 inline_costs += BRANCH_COST;
7399 MONO_INST_NEW (cfg, ins, OP_BR);
7402 target = ip + 4 + (gint32)read32(ip);
7404 GET_BBLOCK (cfg, tblock, target);
7405 link_bblock (cfg, bblock, tblock);
7406 ins->inst_target_bb = tblock;
7407 if (sp != stack_start) {
7408 handle_stack_args (cfg, stack_start, sp - stack_start);
7410 CHECK_UNVERIFIABLE (cfg);
7413 MONO_ADD_INS (bblock, ins);
7415 start_new_bblock = 1;
7416 inline_costs += BRANCH_COST;
7423 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
7424 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
7425 guint32 opsize = is_short ? 1 : 4;
7427 CHECK_OPSIZE (opsize);
7429 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
7432 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
7437 GET_BBLOCK (cfg, tblock, target);
7438 link_bblock (cfg, bblock, tblock);
7439 GET_BBLOCK (cfg, tblock, ip);
7440 link_bblock (cfg, bblock, tblock);
7442 if (sp != stack_start) {
7443 handle_stack_args (cfg, stack_start, sp - stack_start);
7444 CHECK_UNVERIFIABLE (cfg);
7447 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
7448 cmp->sreg1 = sp [0]->dreg;
7449 type_from_op (cmp, sp [0], NULL);
7452 #if SIZEOF_REGISTER == 4
7453 if (cmp->opcode == OP_LCOMPARE_IMM) {
7454 /* Convert it to OP_LCOMPARE */
7455 MONO_INST_NEW (cfg, ins, OP_I8CONST);
7456 ins->type = STACK_I8;
7457 ins->dreg = alloc_dreg (cfg, STACK_I8);
7459 MONO_ADD_INS (bblock, ins);
7460 cmp->opcode = OP_LCOMPARE;
7461 cmp->sreg2 = ins->dreg;
7464 MONO_ADD_INS (bblock, cmp);
7466 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
7467 type_from_op (ins, sp [0], NULL);
7468 MONO_ADD_INS (bblock, ins);
7469 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
7470 GET_BBLOCK (cfg, tblock, target);
7471 ins->inst_true_bb = tblock;
7472 GET_BBLOCK (cfg, tblock, ip);
7473 ins->inst_false_bb = tblock;
7474 start_new_bblock = 2;
7477 inline_costs += BRANCH_COST;
7492 MONO_INST_NEW (cfg, ins, *ip);
7494 target = ip + 4 + (gint32)read32(ip);
7500 inline_costs += BRANCH_COST;
7504 MonoBasicBlock **targets;
7505 MonoBasicBlock *default_bblock;
7506 MonoJumpInfoBBTable *table;
7507 int offset_reg = alloc_preg (cfg);
7508 int target_reg = alloc_preg (cfg);
7509 int table_reg = alloc_preg (cfg);
7510 int sum_reg = alloc_preg (cfg);
7511 gboolean use_op_switch;
7515 n = read32 (ip + 1);
7518 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
7522 CHECK_OPSIZE (n * sizeof (guint32));
7523 target = ip + n * sizeof (guint32);
7525 GET_BBLOCK (cfg, default_bblock, target);
7526 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
7528 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
7529 for (i = 0; i < n; ++i) {
7530 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
7531 targets [i] = tblock;
7532 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
7536 if (sp != stack_start) {
7538 * Link the current bb with the targets as well, so handle_stack_args
7539 * will set their in_stack correctly.
7541 link_bblock (cfg, bblock, default_bblock);
7542 for (i = 0; i < n; ++i)
7543 link_bblock (cfg, bblock, targets [i]);
7545 handle_stack_args (cfg, stack_start, sp - stack_start);
7547 CHECK_UNVERIFIABLE (cfg);
7550 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
7551 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
7554 for (i = 0; i < n; ++i)
7555 link_bblock (cfg, bblock, targets [i]);
7557 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
7558 table->table = targets;
7559 table->table_size = n;
7561 use_op_switch = FALSE;
7563 /* ARM implements SWITCH statements differently */
7564 /* FIXME: Make it use the generic implementation */
7565 if (!cfg->compile_aot)
7566 use_op_switch = TRUE;
7569 if (COMPILE_LLVM (cfg))
7570 use_op_switch = TRUE;
7572 cfg->cbb->has_jump_table = 1;
7574 if (use_op_switch) {
7575 MONO_INST_NEW (cfg, ins, OP_SWITCH);
7576 ins->sreg1 = src1->dreg;
7577 ins->inst_p0 = table;
7578 ins->inst_many_bb = targets;
7579 ins->klass = GUINT_TO_POINTER (n);
7580 MONO_ADD_INS (cfg->cbb, ins);
7582 if (sizeof (gpointer) == 8)
7583 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
7585 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
7587 #if SIZEOF_REGISTER == 8
7588 /* The upper word might not be zero, and we add it to a 64 bit address later */
7589 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
7592 if (cfg->compile_aot) {
7593 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
7595 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
7596 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
7597 ins->inst_p0 = table;
7598 ins->dreg = table_reg;
7599 MONO_ADD_INS (cfg->cbb, ins);
7602 /* FIXME: Use load_memindex */
7603 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
7604 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
7605 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
7607 start_new_bblock = 1;
7608 inline_costs += (BRANCH_COST * 2);
7628 dreg = alloc_freg (cfg);
7631 dreg = alloc_lreg (cfg);
7634 dreg = alloc_ireg_ref (cfg);
7637 dreg = alloc_preg (cfg);
7640 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
7641 ins->type = ldind_type [*ip - CEE_LDIND_I1];
7642 ins->flags |= ins_flag;
7644 MONO_ADD_INS (bblock, ins);
7646 if (ins->flags & MONO_INST_VOLATILE) {
7647 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
7648 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
7649 emit_memory_barrier (cfg, FullBarrier);
7664 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
7665 ins->flags |= ins_flag;
7668 if (ins->flags & MONO_INST_VOLATILE) {
7669 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
7670 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
7671 emit_memory_barrier (cfg, FullBarrier);
7674 MONO_ADD_INS (bblock, ins);
7676 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0)))
7677 emit_write_barrier (cfg, sp [0], sp [1], -1);
7686 MONO_INST_NEW (cfg, ins, (*ip));
7688 ins->sreg1 = sp [0]->dreg;
7689 ins->sreg2 = sp [1]->dreg;
7690 type_from_op (ins, sp [0], sp [1]);
7692 ins->dreg = alloc_dreg ((cfg), (ins)->type);
7694 /* Use the immediate opcodes if possible */
7695 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
7696 int imm_opcode = mono_op_to_op_imm (ins->opcode);
7697 if (imm_opcode != -1) {
7698 ins->opcode = imm_opcode;
7699 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
7702 sp [1]->opcode = OP_NOP;
7706 MONO_ADD_INS ((cfg)->cbb, (ins));
7708 *sp++ = mono_decompose_opcode (cfg, ins);
7725 MONO_INST_NEW (cfg, ins, (*ip));
7727 ins->sreg1 = sp [0]->dreg;
7728 ins->sreg2 = sp [1]->dreg;
7729 type_from_op (ins, sp [0], sp [1]);
7731 ADD_WIDEN_OP (ins, sp [0], sp [1]);
7732 ins->dreg = alloc_dreg ((cfg), (ins)->type);
7734 /* FIXME: Pass opcode to is_inst_imm */
7736 /* Use the immediate opcodes if possible */
7737 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
7740 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
7741 if (imm_opcode != -1) {
7742 ins->opcode = imm_opcode;
7743 if (sp [1]->opcode == OP_I8CONST) {
7744 #if SIZEOF_REGISTER == 8
7745 ins->inst_imm = sp [1]->inst_l;
7747 ins->inst_ls_word = sp [1]->inst_ls_word;
7748 ins->inst_ms_word = sp [1]->inst_ms_word;
7752 ins->inst_imm = (gssize)(sp [1]->inst_c0);
7755 /* Might be followed by an instruction added by ADD_WIDEN_OP */
7756 if (sp [1]->next == NULL)
7757 sp [1]->opcode = OP_NOP;
7760 MONO_ADD_INS ((cfg)->cbb, (ins));
7762 *sp++ = mono_decompose_opcode (cfg, ins);
7775 case CEE_CONV_OVF_I8:
7776 case CEE_CONV_OVF_U8:
7780 /* Special case this earlier so we have long constants in the IR */
7781 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
7782 int data = sp [-1]->inst_c0;
7783 sp [-1]->opcode = OP_I8CONST;
7784 sp [-1]->type = STACK_I8;
7785 #if SIZEOF_REGISTER == 8
7786 if ((*ip) == CEE_CONV_U8)
7787 sp [-1]->inst_c0 = (guint32)data;
7789 sp [-1]->inst_c0 = data;
7791 sp [-1]->inst_ls_word = data;
7792 if ((*ip) == CEE_CONV_U8)
7793 sp [-1]->inst_ms_word = 0;
7795 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
7797 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
7804 case CEE_CONV_OVF_I4:
7805 case CEE_CONV_OVF_I1:
7806 case CEE_CONV_OVF_I2:
7807 case CEE_CONV_OVF_I:
7808 case CEE_CONV_OVF_U:
7811 if (sp [-1]->type == STACK_R8) {
7812 ADD_UNOP (CEE_CONV_OVF_I8);
7819 case CEE_CONV_OVF_U1:
7820 case CEE_CONV_OVF_U2:
7821 case CEE_CONV_OVF_U4:
7824 if (sp [-1]->type == STACK_R8) {
7825 ADD_UNOP (CEE_CONV_OVF_U8);
7832 case CEE_CONV_OVF_I1_UN:
7833 case CEE_CONV_OVF_I2_UN:
7834 case CEE_CONV_OVF_I4_UN:
7835 case CEE_CONV_OVF_I8_UN:
7836 case CEE_CONV_OVF_U1_UN:
7837 case CEE_CONV_OVF_U2_UN:
7838 case CEE_CONV_OVF_U4_UN:
7839 case CEE_CONV_OVF_U8_UN:
7840 case CEE_CONV_OVF_I_UN:
7841 case CEE_CONV_OVF_U_UN:
7848 CHECK_CFG_EXCEPTION;
7852 case CEE_ADD_OVF_UN:
7854 case CEE_MUL_OVF_UN:
7856 case CEE_SUB_OVF_UN:
7864 token = read32 (ip + 1);
7865 klass = mini_get_class (method, token, generic_context);
7866 CHECK_TYPELOAD (klass);
7868 if (generic_class_is_reference_type (cfg, klass)) {
7869 MonoInst *store, *load;
7870 int dreg = alloc_ireg_ref (cfg);
7872 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
7873 load->flags |= ins_flag;
7874 MONO_ADD_INS (cfg->cbb, load);
7876 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
7877 store->flags |= ins_flag;
7878 MONO_ADD_INS (cfg->cbb, store);
7880 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER)
7881 emit_write_barrier (cfg, sp [0], sp [1], -1);
7883 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7895 token = read32 (ip + 1);
7896 klass = mini_get_class (method, token, generic_context);
7897 CHECK_TYPELOAD (klass);
7899 /* Optimize the common ldobj+stloc combination */
7909 loc_index = ip [5] - CEE_STLOC_0;
7916 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
7917 CHECK_LOCAL (loc_index);
7919 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7920 ins->dreg = cfg->locals [loc_index]->dreg;
7926 /* Optimize the ldobj+stobj combination */
7927 /* The reference case ends up being a load+store anyway */
7928 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass)) {
7933 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7940 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7949 CHECK_STACK_OVF (1);
7951 n = read32 (ip + 1);
7953 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
7954 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
7955 ins->type = STACK_OBJ;
7958 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
7959 MonoInst *iargs [1];
7961 EMIT_NEW_PCONST (cfg, iargs [0], mono_method_get_wrapper_data (method, n));
7962 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
7964 if (cfg->opt & MONO_OPT_SHARED) {
7965 MonoInst *iargs [3];
7967 if (cfg->compile_aot) {
7968 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
7970 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7971 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
7972 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
7973 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
7974 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7976 if (bblock->out_of_line) {
7977 MonoInst *iargs [2];
7979 if (image == mono_defaults.corlib) {
7981 * Avoid relocations in AOT and save some space by using a
7982 * version of helper_ldstr specialized to mscorlib.
7984 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
7985 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
7987 /* Avoid creating the string object */
7988 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
7989 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
7990 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
7994 if (cfg->compile_aot) {
7995 NEW_LDSTRCONST (cfg, ins, image, n);
7997 MONO_ADD_INS (bblock, ins);
8000 NEW_PCONST (cfg, ins, NULL);
8001 ins->type = STACK_OBJ;
8002 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
8004 OUT_OF_MEMORY_FAILURE;
8007 MONO_ADD_INS (bblock, ins);
8016 MonoInst *iargs [2];
8017 MonoMethodSignature *fsig;
8020 MonoInst *vtable_arg = NULL;
8023 token = read32 (ip + 1);
8024 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
8025 if (!cmethod || mono_loader_get_last_error ())
8027 fsig = mono_method_get_signature (cmethod, image, token);
8031 mono_save_token_info (cfg, image, token, cmethod);
8033 if (!mono_class_init (cmethod->klass))
8036 if (cfg->generic_sharing_context)
8037 context_used = mono_method_check_context_used (cmethod);
8039 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
8040 if (check_linkdemand (cfg, method, cmethod))
8042 CHECK_CFG_EXCEPTION;
8043 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
8044 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
8047 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable_impl (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
8048 emit_generic_class_init (cfg, cmethod->klass);
8049 CHECK_TYPELOAD (cmethod->klass);
8052 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
8053 mono_method_is_generic_sharable_impl (cmethod, TRUE)) {
8054 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
8055 mono_class_vtable (cfg->domain, cmethod->klass);
8056 CHECK_TYPELOAD (cmethod->klass);
8058 vtable_arg = emit_get_rgctx_method (cfg, context_used,
8059 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
8062 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
8063 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
8065 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
8067 CHECK_TYPELOAD (cmethod->klass);
8068 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
8073 n = fsig->param_count;
8077 * Generate smaller code for the common newobj <exception> instruction in
8078 * argument checking code.
8080 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
8081 is_exception_class (cmethod->klass) && n <= 2 &&
8082 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
8083 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
8084 MonoInst *iargs [3];
8086 g_assert (!vtable_arg);
8090 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
8093 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
8097 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
8102 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
8105 g_assert_not_reached ();
8113 /* move the args to allow room for 'this' in the first position */
8119 /* check_call_signature () requires sp[0] to be set */
8120 this_ins.type = STACK_OBJ;
8122 if (check_call_signature (cfg, fsig, sp))
8127 if (mini_class_is_system_array (cmethod->klass)) {
8128 g_assert (!vtable_arg);
8130 *sp = emit_get_rgctx_method (cfg, context_used,
8131 cmethod, MONO_RGCTX_INFO_METHOD);
8133 /* Avoid varargs in the common case */
8134 if (fsig->param_count == 1)
8135 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
8136 else if (fsig->param_count == 2)
8137 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
8138 else if (fsig->param_count == 3)
8139 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
8141 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
8142 } else if (cmethod->string_ctor) {
8143 g_assert (!context_used);
8144 g_assert (!vtable_arg);
8145 /* we simply pass a null pointer */
8146 EMIT_NEW_PCONST (cfg, *sp, NULL);
8147 /* now call the string ctor */
8148 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, sp, NULL, NULL, NULL);
8150 MonoInst* callvirt_this_arg = NULL;
8152 if (cmethod->klass->valuetype) {
8153 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
8154 MONO_EMIT_NEW_VZERO (cfg, iargs [0]->dreg, cmethod->klass);
8155 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
8160 * The code generated by mini_emit_virtual_call () expects
8161 * iargs [0] to be a boxed instance, but luckily the vcall
8162 * will be transformed into a normal call there.
8164 } else if (context_used) {
8165 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
8168 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
8170 CHECK_TYPELOAD (cmethod->klass);
8173 * TypeInitializationExceptions thrown from the mono_runtime_class_init
8174 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
8175 * As a workaround, we call class cctors before allocating objects.
8177 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
8178 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
8179 if (cfg->verbose_level > 2)
8180 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
8181 class_inits = g_slist_prepend (class_inits, vtable);
8184 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
8187 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
8190 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
8192 /* Now call the actual ctor */
8193 /* Avoid virtual calls to ctors if possible */
8194 if (cmethod->klass->marshalbyref)
8195 callvirt_this_arg = sp [0];
8198 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
8199 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8200 type_to_eval_stack_type ((cfg), fsig->ret, ins);
8205 CHECK_CFG_EXCEPTION;
8206 } else if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
8207 !disable_inline && mono_method_check_inlining (cfg, cmethod) &&
8208 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE) &&
8209 !g_list_find (dont_inline, cmethod)) {
8212 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, FALSE))) {
8213 cfg->real_offset += 5;
8216 inline_costs += costs - 5;
8219 mono_emit_method_call_full (cfg, cmethod, fsig, sp, callvirt_this_arg, NULL, NULL);
8221 } else if (context_used &&
8222 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
8223 !mono_class_generic_sharing_enabled (cmethod->klass))) {
8224 MonoInst *cmethod_addr;
8226 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
8227 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
8229 mono_emit_calli (cfg, fsig, sp, cmethod_addr, vtable_arg);
8232 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp,
8233 callvirt_this_arg, NULL, vtable_arg);
8237 if (alloc == NULL) {
8239 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
8240 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
8254 token = read32 (ip + 1);
8255 klass = mini_get_class (method, token, generic_context);
8256 CHECK_TYPELOAD (klass);
8257 if (sp [0]->type != STACK_OBJ)
8260 if (cfg->generic_sharing_context)
8261 context_used = mono_class_check_context_used (klass);
8263 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
8264 MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
8271 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
8274 if (cfg->compile_aot)
8275 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
8277 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
8279 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
8280 *sp++ = mono_emit_method_call (cfg, mono_castclass, args, NULL);
8283 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
8284 MonoMethod *mono_castclass;
8285 MonoInst *iargs [1];
8288 mono_castclass = mono_marshal_get_castclass (klass);
8291 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
8292 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8293 CHECK_CFG_EXCEPTION;
8294 g_assert (costs > 0);
8297 cfg->real_offset += 5;
8302 inline_costs += costs;
8305 ins = handle_castclass (cfg, klass, *sp, context_used);
8306 CHECK_CFG_EXCEPTION;
8316 token = read32 (ip + 1);
8317 klass = mini_get_class (method, token, generic_context);
8318 CHECK_TYPELOAD (klass);
8319 if (sp [0]->type != STACK_OBJ)
8322 if (cfg->generic_sharing_context)
8323 context_used = mono_class_check_context_used (klass);
8325 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
8326 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
8333 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
8336 if (cfg->compile_aot)
8337 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
8339 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
8341 *sp++ = mono_emit_method_call (cfg, mono_isinst, args, NULL);
8344 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
8345 MonoMethod *mono_isinst;
8346 MonoInst *iargs [1];
8349 mono_isinst = mono_marshal_get_isinst (klass);
8352 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
8353 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8354 CHECK_CFG_EXCEPTION;
8355 g_assert (costs > 0);
8358 cfg->real_offset += 5;
8363 inline_costs += costs;
8366 ins = handle_isinst (cfg, klass, *sp, context_used);
8367 CHECK_CFG_EXCEPTION;
8374 case CEE_UNBOX_ANY: {
8378 token = read32 (ip + 1);
8379 klass = mini_get_class (method, token, generic_context);
8380 CHECK_TYPELOAD (klass);
8382 mono_save_token_info (cfg, image, token, klass);
8384 if (cfg->generic_sharing_context)
8385 context_used = mono_class_check_context_used (klass);
8387 if (generic_class_is_reference_type (cfg, klass)) {
8388 /* CASTCLASS FIXME kill this huge slice of duplicated code*/
8389 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
8390 MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
8397 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
8400 /*FIXME AOT support*/
8401 if (cfg->compile_aot)
8402 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
8404 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
8406 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
8407 *sp++ = mono_emit_method_call (cfg, mono_castclass, args, NULL);
8410 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
8411 MonoMethod *mono_castclass;
8412 MonoInst *iargs [1];
8415 mono_castclass = mono_marshal_get_castclass (klass);
8418 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
8419 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8420 CHECK_CFG_EXCEPTION;
8421 g_assert (costs > 0);
8424 cfg->real_offset += 5;
8428 inline_costs += costs;
8430 ins = handle_castclass (cfg, klass, *sp, context_used);
8431 CHECK_CFG_EXCEPTION;
8439 if (mono_class_is_nullable (klass)) {
8440 ins = handle_unbox_nullable (cfg, *sp, klass, context_used);
8447 ins = handle_unbox (cfg, klass, sp, context_used);
8453 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
8466 token = read32 (ip + 1);
8467 klass = mini_get_class (method, token, generic_context);
8468 CHECK_TYPELOAD (klass);
8470 mono_save_token_info (cfg, image, token, klass);
8472 if (cfg->generic_sharing_context)
8473 context_used = mono_class_check_context_used (klass);
8475 if (generic_class_is_reference_type (cfg, klass)) {
8481 if (klass == mono_defaults.void_class)
8483 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
8485 /* frequent check in generic code: box (struct), brtrue */
8487 // FIXME: LLVM can't handle the inconsistent bb linking
8488 if (!mono_class_is_nullable (klass) &&
8489 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) &&
8490 (ip [5] == CEE_BRTRUE ||
8491 ip [5] == CEE_BRTRUE_S ||
8492 ip [5] == CEE_BRFALSE ||
8493 ip [5] == CEE_BRFALSE_S)) {
8494 gboolean is_true = ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S;
8496 MonoBasicBlock *true_bb, *false_bb;
8500 if (cfg->verbose_level > 3) {
8501 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
8502 printf ("<box+brtrue opt>\n");
8510 target = ip + 1 + (signed char)(*ip);
8517 target = ip + 4 + (gint)(read32 (ip));
8521 g_assert_not_reached ();
8525 * We need to link both bblocks, since it is needed for handling stack
8526 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
8527 * Branching to only one of them would lead to inconsistencies, so
8528 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
8530 GET_BBLOCK (cfg, true_bb, target);
8531 GET_BBLOCK (cfg, false_bb, ip);
8533 mono_link_bblock (cfg, cfg->cbb, true_bb);
8534 mono_link_bblock (cfg, cfg->cbb, false_bb);
8536 if (sp != stack_start) {
8537 handle_stack_args (cfg, stack_start, sp - stack_start);
8539 CHECK_UNVERIFIABLE (cfg);
8542 if (COMPILE_LLVM (cfg)) {
8543 dreg = alloc_ireg (cfg);
8544 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
8545 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
8547 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
8549 /* The JIT can't eliminate the iconst+compare */
8550 MONO_INST_NEW (cfg, ins, OP_BR);
8551 ins->inst_target_bb = is_true ? true_bb : false_bb;
8552 MONO_ADD_INS (cfg->cbb, ins);
8555 start_new_bblock = 1;
8559 *sp++ = handle_box (cfg, val, klass, context_used);
8561 CHECK_CFG_EXCEPTION;
8570 token = read32 (ip + 1);
8571 klass = mini_get_class (method, token, generic_context);
8572 CHECK_TYPELOAD (klass);
8574 mono_save_token_info (cfg, image, token, klass);
8576 if (cfg->generic_sharing_context)
8577 context_used = mono_class_check_context_used (klass);
8579 if (mono_class_is_nullable (klass)) {
8582 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
8583 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
8587 ins = handle_unbox (cfg, klass, sp, context_used);
8600 MonoClassField *field;
8603 gboolean is_instance;
8605 gpointer addr = NULL;
8606 gboolean is_special_static;
8608 MonoInst *store_val = NULL;
8611 is_instance = (op == CEE_LDFLD || op == CEE_LDFLDA || op == CEE_STFLD);
8613 if (op == CEE_STFLD) {
8621 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
8623 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
8626 if (op == CEE_STSFLD) {
8634 token = read32 (ip + 1);
8635 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8636 field = mono_method_get_wrapper_data (method, token);
8637 klass = field->parent;
8640 field = mono_field_from_token (image, token, &klass, generic_context);
8644 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
8645 FIELD_ACCESS_FAILURE;
8646 mono_class_init (klass);
8648 if (is_instance && *ip != CEE_LDFLDA && is_magic_tls_access (field))
8651 /* if the class is Critical then transparent code cannot access it's fields */
8652 if (!is_instance && mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
8653 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
8655 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
8656 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
8657 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
8658 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
8662 * LDFLD etc. is usable on static fields as well, so convert those cases to
8665 if (is_instance && field->type->attrs & FIELD_ATTRIBUTE_STATIC) {
8677 g_assert_not_reached ();
8679 is_instance = FALSE;
8684 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
8685 if (op == CEE_STFLD) {
8686 if (target_type_is_incompatible (cfg, field->type, sp [1]))
8688 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
8689 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
8690 MonoInst *iargs [5];
8693 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8694 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
8695 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
8699 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
8700 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
8701 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8702 CHECK_CFG_EXCEPTION;
8703 g_assert (costs > 0);
8705 cfg->real_offset += 5;
8708 inline_costs += costs;
8710 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
8715 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
8717 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
8718 if (sp [0]->opcode != OP_LDADDR)
8719 store->flags |= MONO_INST_FAULT;
8721 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
8722 /* insert call to write barrier */
8726 dreg = alloc_ireg_mp (cfg);
8727 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
8728 emit_write_barrier (cfg, ptr, sp [1], -1);
8731 store->flags |= ins_flag;
8738 if (is_instance && ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class)) {
8739 MonoMethod *wrapper = (op == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
8740 MonoInst *iargs [4];
8743 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8744 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
8745 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
8746 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
8747 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
8748 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8749 CHECK_CFG_EXCEPTION;
8751 g_assert (costs > 0);
8753 cfg->real_offset += 5;
8757 inline_costs += costs;
8759 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
8762 } else if (is_instance) {
8763 if (sp [0]->type == STACK_VTYPE) {
8766 /* Have to compute the address of the variable */
8768 var = get_vreg_to_inst (cfg, sp [0]->dreg);
8770 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
8772 g_assert (var->klass == klass);
8774 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
8778 if (op == CEE_LDFLDA) {
8779 if (is_magic_tls_access (field)) {
8781 *sp++ = create_magic_tls_access (cfg, field, &cached_tls_addr, ins);
8783 if (sp [0]->type == STACK_OBJ) {
8784 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
8785 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
8788 dreg = alloc_ireg_mp (cfg);
8790 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
8791 ins->klass = mono_class_from_mono_type (field->type);
8792 ins->type = STACK_MP;
8798 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
8800 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
8801 load->flags |= ins_flag;
8802 if (sp [0]->opcode != OP_LDADDR)
8803 load->flags |= MONO_INST_FAULT;
8817 * We can only support shared generic static
8818 * field access on architectures where the
8819 * trampoline code has been extended to handle
8820 * the generic class init.
8822 #ifndef MONO_ARCH_VTABLE_REG
8823 GENERIC_SHARING_FAILURE (op);
8826 if (cfg->generic_sharing_context)
8827 context_used = mono_class_check_context_used (klass);
8829 ftype = mono_field_get_type (field);
8831 g_assert (!(ftype->attrs & FIELD_ATTRIBUTE_LITERAL));
8833 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
8834 * to be called here.
8836 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
8837 mono_class_vtable (cfg->domain, klass);
8838 CHECK_TYPELOAD (klass);
8840 mono_domain_lock (cfg->domain);
8841 if (cfg->domain->special_static_fields)
8842 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
8843 mono_domain_unlock (cfg->domain);
8845 is_special_static = mono_class_field_is_special_static (field);
8847 /* Generate IR to compute the field address */
8848 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && mono_get_thread_intrinsic (cfg) && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
8850 * Fast access to TLS data
8851 * Inline version of get_thread_static_data () in
8855 int idx, static_data_reg, array_reg, dreg;
8856 MonoInst *thread_ins;
8858 // offset &= 0x7fffffff;
8859 // idx = (offset >> 24) - 1;
8860 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
8862 thread_ins = mono_get_thread_intrinsic (cfg);
8863 MONO_ADD_INS (cfg->cbb, thread_ins);
8864 static_data_reg = alloc_ireg (cfg);
8865 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, G_STRUCT_OFFSET (MonoInternalThread, static_data));
8867 if (cfg->compile_aot) {
8868 int offset_reg, offset2_reg, idx_reg;
8870 /* For TLS variables, this will return the TLS offset */
8871 EMIT_NEW_SFLDACONST (cfg, ins, field);
8872 offset_reg = ins->dreg;
8873 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
8874 idx_reg = alloc_ireg (cfg);
8875 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
8876 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
8877 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
8878 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
8879 array_reg = alloc_ireg (cfg);
8880 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
8881 offset2_reg = alloc_ireg (cfg);
8882 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
8883 dreg = alloc_ireg (cfg);
8884 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
8886 offset = (gsize)addr & 0x7fffffff;
8887 idx = (offset >> 24) - 1;
8889 array_reg = alloc_ireg (cfg);
8890 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
8891 dreg = alloc_ireg (cfg);
8892 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, (offset & 0xffffff));
8894 } else if ((cfg->opt & MONO_OPT_SHARED) ||
8895 (cfg->compile_aot && is_special_static) ||
8896 (context_used && is_special_static)) {
8897 MonoInst *iargs [2];
8899 g_assert (field->parent);
8900 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8902 iargs [1] = emit_get_rgctx_field (cfg, context_used,
8903 field, MONO_RGCTX_INFO_CLASS_FIELD);
8905 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8907 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8908 } else if (context_used) {
8909 MonoInst *static_data;
8912 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
8913 method->klass->name_space, method->klass->name, method->name,
8914 depth, field->offset);
8917 if (mono_class_needs_cctor_run (klass, method))
8918 emit_generic_class_init (cfg, klass);
8921 * The pointer we're computing here is
8923 * super_info.static_data + field->offset
8925 static_data = emit_get_rgctx_klass (cfg, context_used,
8926 klass, MONO_RGCTX_INFO_STATIC_DATA);
8928 if (field->offset == 0) {
8931 int addr_reg = mono_alloc_preg (cfg);
8932 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
8934 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
8935 MonoInst *iargs [2];
8937 g_assert (field->parent);
8938 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8939 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8940 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8942 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
8944 CHECK_TYPELOAD (klass);
8946 if (mini_field_access_needs_cctor_run (cfg, method, vtable)) {
8947 if (!(g_slist_find (class_inits, vtable))) {
8948 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
8949 if (cfg->verbose_level > 2)
8950 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
8951 class_inits = g_slist_prepend (class_inits, vtable);
8954 if (cfg->run_cctors) {
8956 /* This makes so that inline cannot trigger */
8957 /* .cctors: too many apps depend on them */
8958 /* running with a specific order... */
8959 if (! vtable->initialized)
8961 ex = mono_runtime_class_init_full (vtable, FALSE);
8963 set_exception_object (cfg, ex);
8964 goto exception_exit;
8968 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
8970 if (cfg->compile_aot)
8971 EMIT_NEW_SFLDACONST (cfg, ins, field);
8973 EMIT_NEW_PCONST (cfg, ins, addr);
8975 MonoInst *iargs [1];
8976 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
8977 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
8981 /* Generate IR to do the actual load/store operation */
8983 if (op == CEE_LDSFLDA) {
8984 ins->klass = mono_class_from_mono_type (ftype);
8985 ins->type = STACK_PTR;
8987 } else if (op == CEE_STSFLD) {
8990 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, ftype, ins->dreg, 0, store_val->dreg);
8991 store->flags |= ins_flag;
8993 gboolean is_const = FALSE;
8994 MonoVTable *vtable = NULL;
8995 gpointer addr = NULL;
8997 if (!context_used) {
8998 vtable = mono_class_vtable (cfg->domain, klass);
8999 CHECK_TYPELOAD (klass);
9001 if ((ftype->attrs & FIELD_ATTRIBUTE_INIT_ONLY) && (((addr = mono_aot_readonly_field_override (field)) != NULL) ||
9002 (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) && vtable->initialized))) {
9003 int ro_type = ftype->type;
9005 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
9006 if (ro_type == MONO_TYPE_VALUETYPE && ftype->data.klass->enumtype) {
9007 ro_type = mono_class_enum_basetype (ftype->data.klass)->type;
9009 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
9012 case MONO_TYPE_BOOLEAN:
9014 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
9018 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
9021 case MONO_TYPE_CHAR:
9023 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
9027 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
9032 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
9036 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
9042 case MONO_TYPE_FNPTR:
9043 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
9044 type_to_eval_stack_type ((cfg), field->type, *sp);
9047 case MONO_TYPE_STRING:
9048 case MONO_TYPE_OBJECT:
9049 case MONO_TYPE_CLASS:
9050 case MONO_TYPE_SZARRAY:
9051 case MONO_TYPE_ARRAY:
9052 if (!mono_gc_is_moving ()) {
9053 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
9054 type_to_eval_stack_type ((cfg), field->type, *sp);
9062 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
9067 case MONO_TYPE_VALUETYPE:
9077 CHECK_STACK_OVF (1);
9079 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
9080 load->flags |= ins_flag;
9093 token = read32 (ip + 1);
9094 klass = mini_get_class (method, token, generic_context);
9095 CHECK_TYPELOAD (klass);
9096 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
9097 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
9098 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
9099 generic_class_is_reference_type (cfg, klass)) {
9100 /* insert call to write barrier */
9101 emit_write_barrier (cfg, sp [0], sp [1], -1);
9113 const char *data_ptr;
9115 guint32 field_token;
9121 token = read32 (ip + 1);
9123 klass = mini_get_class (method, token, generic_context);
9124 CHECK_TYPELOAD (klass);
9126 if (cfg->generic_sharing_context)
9127 context_used = mono_class_check_context_used (klass);
9129 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
9130 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_I4);
9131 ins->sreg1 = sp [0]->dreg;
9132 ins->type = STACK_I4;
9133 ins->dreg = alloc_ireg (cfg);
9134 MONO_ADD_INS (cfg->cbb, ins);
9135 *sp = mono_decompose_opcode (cfg, ins);
9140 MonoClass *array_class = mono_array_class_get (klass, 1);
9141 /* FIXME: we cannot get a managed
9142 allocator because we can't get the
9143 open generic class's vtable. We
9144 have the same problem in
9145 handle_alloc(). This
9146 needs to be solved so that we can
9147 have managed allocs of shared
9150 MonoVTable *array_class_vtable = mono_class_vtable (cfg->domain, array_class);
9151 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class_vtable, 1);
9153 MonoMethod *managed_alloc = NULL;
9155 /* FIXME: Decompose later to help abcrem */
9158 args [0] = emit_get_rgctx_klass (cfg, context_used,
9159 array_class, MONO_RGCTX_INFO_VTABLE);
9164 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
9166 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
9168 if (cfg->opt & MONO_OPT_SHARED) {
9169 /* Decompose now to avoid problems with references to the domainvar */
9170 MonoInst *iargs [3];
9172 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
9173 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
9176 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
9178 /* Decompose later since it is needed by abcrem */
9179 MonoClass *array_type = mono_array_class_get (klass, 1);
9180 mono_class_vtable (cfg->domain, array_type);
9181 CHECK_TYPELOAD (array_type);
9183 MONO_INST_NEW (cfg, ins, OP_NEWARR);
9184 ins->dreg = alloc_ireg_ref (cfg);
9185 ins->sreg1 = sp [0]->dreg;
9186 ins->inst_newa_class = klass;
9187 ins->type = STACK_OBJ;
9189 MONO_ADD_INS (cfg->cbb, ins);
9190 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
9191 cfg->cbb->has_array_access = TRUE;
9193 /* Needed so mono_emit_load_get_addr () gets called */
9194 mono_get_got_var (cfg);
9204 * we inline/optimize the initialization sequence if possible.
9205 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
9206 * for small sizes open code the memcpy
9207 * ensure the rva field is big enough
9209 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
9210 MonoMethod *memcpy_method = get_memcpy_method ();
9211 MonoInst *iargs [3];
9212 int add_reg = alloc_ireg_mp (cfg);
9214 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, G_STRUCT_OFFSET (MonoArray, vector));
9215 if (cfg->compile_aot) {
9216 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
9218 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
9220 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
9221 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
9230 if (sp [0]->type != STACK_OBJ)
9233 MONO_INST_NEW (cfg, ins, OP_LDLEN);
9234 ins->dreg = alloc_preg (cfg);
9235 ins->sreg1 = sp [0]->dreg;
9236 ins->type = STACK_I4;
9237 /* This flag will be inherited by the decomposition */
9238 ins->flags |= MONO_INST_FAULT;
9239 MONO_ADD_INS (cfg->cbb, ins);
9240 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
9241 cfg->cbb->has_array_access = TRUE;
9249 if (sp [0]->type != STACK_OBJ)
9252 cfg->flags |= MONO_CFG_HAS_LDELEMA;
9254 klass = mini_get_class (method, read32 (ip + 1), generic_context);
9255 CHECK_TYPELOAD (klass);
9256 /* we need to make sure that this array is exactly the type it needs
9257 * to be for correctness. the wrappers are lax with their usage
9258 * so we need to ignore them here
9260 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
9261 MonoClass *array_class = mono_array_class_get (klass, 1);
9262 mini_emit_check_array_type (cfg, sp [0], array_class);
9263 CHECK_TYPELOAD (array_class);
9267 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
9282 case CEE_LDELEM_REF: {
9288 if (*ip == CEE_LDELEM) {
9290 token = read32 (ip + 1);
9291 klass = mini_get_class (method, token, generic_context);
9292 CHECK_TYPELOAD (klass);
9293 mono_class_init (klass);
9296 klass = array_access_to_klass (*ip);
9298 if (sp [0]->type != STACK_OBJ)
9301 cfg->flags |= MONO_CFG_HAS_LDELEMA;
9303 if (sp [1]->opcode == OP_ICONST) {
9304 int array_reg = sp [0]->dreg;
9305 int index_reg = sp [1]->dreg;
9306 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
9308 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
9309 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
9311 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
9312 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
9315 if (*ip == CEE_LDELEM)
9328 case CEE_STELEM_REF:
9335 cfg->flags |= MONO_CFG_HAS_LDELEMA;
9337 if (*ip == CEE_STELEM) {
9339 token = read32 (ip + 1);
9340 klass = mini_get_class (method, token, generic_context);
9341 CHECK_TYPELOAD (klass);
9342 mono_class_init (klass);
9345 klass = array_access_to_klass (*ip);
9347 if (sp [0]->type != STACK_OBJ)
9350 /* storing a NULL doesn't need any of the complex checks in stelemref */
9351 if (generic_class_is_reference_type (cfg, klass) &&
9352 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
9353 MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class, 1);
9354 MonoMethod *helper = mono_marshal_get_virtual_stelemref (obj_array);
9355 MonoInst *iargs [3];
9358 mono_class_setup_vtable (obj_array);
9359 g_assert (helper->slot);
9361 if (sp [0]->type != STACK_OBJ)
9363 if (sp [2]->type != STACK_OBJ)
9370 mono_emit_method_call (cfg, helper, iargs, sp [0]);
9372 if (sp [1]->opcode == OP_ICONST) {
9373 int array_reg = sp [0]->dreg;
9374 int index_reg = sp [1]->dreg;
9375 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
9377 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
9378 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
9380 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
9381 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
9385 if (*ip == CEE_STELEM)
9392 case CEE_CKFINITE: {
9396 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
9397 ins->sreg1 = sp [0]->dreg;
9398 ins->dreg = alloc_freg (cfg);
9399 ins->type = STACK_R8;
9400 MONO_ADD_INS (bblock, ins);
9402 *sp++ = mono_decompose_opcode (cfg, ins);
9407 case CEE_REFANYVAL: {
9408 MonoInst *src_var, *src;
9410 int klass_reg = alloc_preg (cfg);
9411 int dreg = alloc_preg (cfg);
9414 MONO_INST_NEW (cfg, ins, *ip);
9417 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
9418 CHECK_TYPELOAD (klass);
9419 mono_class_init (klass);
9421 if (cfg->generic_sharing_context)
9422 context_used = mono_class_check_context_used (klass);
9425 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9427 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
9428 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
9429 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass));
9432 MonoInst *klass_ins;
9434 klass_ins = emit_get_rgctx_klass (cfg, context_used,
9435 klass, MONO_RGCTX_INFO_KLASS);
9438 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
9439 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
9441 mini_emit_class_check (cfg, klass_reg, klass);
9443 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, value));
9444 ins->type = STACK_MP;
9449 case CEE_MKREFANY: {
9450 MonoInst *loc, *addr;
9453 MONO_INST_NEW (cfg, ins, *ip);
9456 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
9457 CHECK_TYPELOAD (klass);
9458 mono_class_init (klass);
9460 if (cfg->generic_sharing_context)
9461 context_used = mono_class_check_context_used (klass);
9463 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
9464 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
9467 MonoInst *const_ins;
9468 int type_reg = alloc_preg (cfg);
9470 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
9471 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
9472 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, G_STRUCT_OFFSET (MonoClass, byval_arg));
9473 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
9474 } else if (cfg->compile_aot) {
9475 int const_reg = alloc_preg (cfg);
9476 int type_reg = alloc_preg (cfg);
9478 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
9479 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
9480 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, G_STRUCT_OFFSET (MonoClass, byval_arg));
9481 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
9483 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
9484 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), klass);
9486 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
9488 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
9489 ins->type = STACK_VTYPE;
9490 ins->klass = mono_defaults.typed_reference_class;
9497 MonoClass *handle_class;
9499 CHECK_STACK_OVF (1);
9502 n = read32 (ip + 1);
9504 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
9505 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
9506 handle = mono_method_get_wrapper_data (method, n);
9507 handle_class = mono_method_get_wrapper_data (method, n + 1);
9508 if (handle_class == mono_defaults.typehandle_class)
9509 handle = &((MonoClass*)handle)->byval_arg;
9512 handle = mono_ldtoken (image, n, &handle_class, generic_context);
9516 mono_class_init (handle_class);
9517 if (cfg->generic_sharing_context) {
9518 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
9519 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
9520 /* This case handles ldtoken
9521 of an open type, like for
9524 } else if (handle_class == mono_defaults.typehandle_class) {
9525 /* If we get a MONO_TYPE_CLASS
9526 then we need to provide the
9528 instantiation of it. */
9529 if (mono_type_get_type (handle) == MONO_TYPE_CLASS)
9532 context_used = mono_class_check_context_used (mono_class_from_mono_type (handle));
9533 } else if (handle_class == mono_defaults.fieldhandle_class)
9534 context_used = mono_class_check_context_used (((MonoClassField*)handle)->parent);
9535 else if (handle_class == mono_defaults.methodhandle_class)
9536 context_used = mono_method_check_context_used (handle);
9538 g_assert_not_reached ();
9541 if ((cfg->opt & MONO_OPT_SHARED) &&
9542 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
9543 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
9544 MonoInst *addr, *vtvar, *iargs [3];
9545 int method_context_used;
9547 if (cfg->generic_sharing_context)
9548 method_context_used = mono_method_check_context_used (method);
9550 method_context_used = 0;
9552 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
9554 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
9555 EMIT_NEW_ICONST (cfg, iargs [1], n);
9556 if (method_context_used) {
9557 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
9558 method, MONO_RGCTX_INFO_METHOD);
9559 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
9561 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
9562 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
9564 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9566 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
9568 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9570 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
9571 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
9572 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
9573 (cmethod->klass == mono_defaults.monotype_class->parent) &&
9574 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
9575 MonoClass *tclass = mono_class_from_mono_type (handle);
9577 mono_class_init (tclass);
9579 ins = emit_get_rgctx_klass (cfg, context_used,
9580 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
9581 } else if (cfg->compile_aot) {
9582 if (method->wrapper_type) {
9583 if (mono_class_get (tclass->image, tclass->type_token) == tclass && !generic_context) {
9584 /* Special case for static synchronized wrappers */
9585 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
9587 /* FIXME: n is not a normal token */
9588 cfg->disable_aot = TRUE;
9589 EMIT_NEW_PCONST (cfg, ins, NULL);
9592 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
9595 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
9597 ins->type = STACK_OBJ;
9598 ins->klass = cmethod->klass;
9601 MonoInst *addr, *vtvar;
9603 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
9606 if (handle_class == mono_defaults.typehandle_class) {
9607 ins = emit_get_rgctx_klass (cfg, context_used,
9608 mono_class_from_mono_type (handle),
9609 MONO_RGCTX_INFO_TYPE);
9610 } else if (handle_class == mono_defaults.methodhandle_class) {
9611 ins = emit_get_rgctx_method (cfg, context_used,
9612 handle, MONO_RGCTX_INFO_METHOD);
9613 } else if (handle_class == mono_defaults.fieldhandle_class) {
9614 ins = emit_get_rgctx_field (cfg, context_used,
9615 handle, MONO_RGCTX_INFO_CLASS_FIELD);
9617 g_assert_not_reached ();
9619 } else if (cfg->compile_aot) {
9620 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n);
9622 EMIT_NEW_PCONST (cfg, ins, handle);
9624 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9625 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
9626 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9636 MONO_INST_NEW (cfg, ins, OP_THROW);
9638 ins->sreg1 = sp [0]->dreg;
9640 bblock->out_of_line = TRUE;
9641 MONO_ADD_INS (bblock, ins);
9642 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
9643 MONO_ADD_INS (bblock, ins);
9646 link_bblock (cfg, bblock, end_bblock);
9647 start_new_bblock = 1;
9649 case CEE_ENDFINALLY:
9650 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
9651 MONO_ADD_INS (bblock, ins);
9653 start_new_bblock = 1;
9656 * Control will leave the method so empty the stack, otherwise
9657 * the next basic block will start with a nonempty stack.
9659 while (sp != stack_start) {
9667 if (*ip == CEE_LEAVE) {
9669 target = ip + 5 + (gint32)read32(ip + 1);
9672 target = ip + 2 + (signed char)(ip [1]);
9675 /* empty the stack */
9676 while (sp != stack_start) {
9681 * If this leave statement is in a catch block, check for a
9682 * pending exception, and rethrow it if necessary.
9683 * We avoid doing this in runtime invoke wrappers, since those are called
9684 * by native code which excepts the wrapper to catch all exceptions.
9686 for (i = 0; i < header->num_clauses; ++i) {
9687 MonoExceptionClause *clause = &header->clauses [i];
9690 * Use <= in the final comparison to handle clauses with multiple
9691 * leave statements, like in bug #78024.
9692 * The ordering of the exception clauses guarantees that we find the
9695 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
9697 MonoBasicBlock *dont_throw;
9702 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
9705 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
9707 NEW_BBLOCK (cfg, dont_throw);
9710 * Currently, we always rethrow the abort exception, despite the
9711 * fact that this is not correct. See thread6.cs for an example.
9712 * But propagating the abort exception is more important than
9713 * getting the sematics right.
9715 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
9716 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
9717 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
9719 MONO_START_BB (cfg, dont_throw);
9724 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
9726 MonoExceptionClause *clause;
9728 for (tmp = handlers; tmp; tmp = tmp->next) {
9730 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
9732 link_bblock (cfg, bblock, tblock);
9733 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
9734 ins->inst_target_bb = tblock;
9735 ins->inst_eh_block = clause;
9736 MONO_ADD_INS (bblock, ins);
9737 bblock->has_call_handler = 1;
9738 if (COMPILE_LLVM (cfg)) {
9739 MonoBasicBlock *target_bb;
9742 * Link the finally bblock with the target, since it will
9743 * conceptually branch there.
9744 * FIXME: Have to link the bblock containing the endfinally.
9746 GET_BBLOCK (cfg, target_bb, target);
9747 link_bblock (cfg, tblock, target_bb);
9750 g_list_free (handlers);
9753 MONO_INST_NEW (cfg, ins, OP_BR);
9754 MONO_ADD_INS (bblock, ins);
9755 GET_BBLOCK (cfg, tblock, target);
9756 link_bblock (cfg, bblock, tblock);
9757 ins->inst_target_bb = tblock;
9758 start_new_bblock = 1;
9760 if (*ip == CEE_LEAVE)
9769 * Mono specific opcodes
9771 case MONO_CUSTOM_PREFIX: {
9773 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
9777 case CEE_MONO_ICALL: {
9779 MonoJitICallInfo *info;
9781 token = read32 (ip + 2);
9782 func = mono_method_get_wrapper_data (method, token);
9783 info = mono_find_jit_icall_by_addr (func);
9786 CHECK_STACK (info->sig->param_count);
9787 sp -= info->sig->param_count;
9789 ins = mono_emit_jit_icall (cfg, info->func, sp);
9790 if (!MONO_TYPE_IS_VOID (info->sig->ret))
9794 inline_costs += 10 * num_calls++;
9798 case CEE_MONO_LDPTR: {
9801 CHECK_STACK_OVF (1);
9803 token = read32 (ip + 2);
9805 ptr = mono_method_get_wrapper_data (method, token);
9806 if (cfg->compile_aot && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) && (strstr (method->name, "__icall_wrapper_") == method->name)) {
9807 MonoJitICallInfo *callinfo;
9808 const char *icall_name;
9810 icall_name = method->name + strlen ("__icall_wrapper_");
9811 g_assert (icall_name);
9812 callinfo = mono_find_jit_icall_by_name (icall_name);
9813 g_assert (callinfo);
9815 if (ptr == callinfo->func) {
9816 /* Will be transformed into an AOTCONST later */
9817 EMIT_NEW_PCONST (cfg, ins, ptr);
9823 /* FIXME: Generalize this */
9824 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
9825 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
9830 EMIT_NEW_PCONST (cfg, ins, ptr);
9833 inline_costs += 10 * num_calls++;
9834 /* Can't embed random pointers into AOT code */
9835 cfg->disable_aot = 1;
9838 case CEE_MONO_ICALL_ADDR: {
9839 MonoMethod *cmethod;
9842 CHECK_STACK_OVF (1);
9844 token = read32 (ip + 2);
9846 cmethod = mono_method_get_wrapper_data (method, token);
9848 if (cfg->compile_aot) {
9849 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
9851 ptr = mono_lookup_internal_call (cmethod);
9853 EMIT_NEW_PCONST (cfg, ins, ptr);
9859 case CEE_MONO_VTADDR: {
9860 MonoInst *src_var, *src;
9866 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9867 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
9872 case CEE_MONO_NEWOBJ: {
9873 MonoInst *iargs [2];
9875 CHECK_STACK_OVF (1);
9877 token = read32 (ip + 2);
9878 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9879 mono_class_init (klass);
9880 NEW_DOMAINCONST (cfg, iargs [0]);
9881 MONO_ADD_INS (cfg->cbb, iargs [0]);
9882 NEW_CLASSCONST (cfg, iargs [1], klass);
9883 MONO_ADD_INS (cfg->cbb, iargs [1]);
9884 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
9886 inline_costs += 10 * num_calls++;
9889 case CEE_MONO_OBJADDR:
9892 MONO_INST_NEW (cfg, ins, OP_MOVE);
9893 ins->dreg = alloc_ireg_mp (cfg);
9894 ins->sreg1 = sp [0]->dreg;
9895 ins->type = STACK_MP;
9896 MONO_ADD_INS (cfg->cbb, ins);
9900 case CEE_MONO_LDNATIVEOBJ:
9902 * Similar to LDOBJ, but instead load the unmanaged
9903 * representation of the vtype to the stack.
9908 token = read32 (ip + 2);
9909 klass = mono_method_get_wrapper_data (method, token);
9910 g_assert (klass->valuetype);
9911 mono_class_init (klass);
9914 MonoInst *src, *dest, *temp;
9917 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
9918 temp->backend.is_pinvoke = 1;
9919 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
9920 mini_emit_stobj (cfg, dest, src, klass, TRUE);
9922 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
9923 dest->type = STACK_VTYPE;
9924 dest->klass = klass;
9930 case CEE_MONO_RETOBJ: {
9932 * Same as RET, but return the native representation of a vtype
9935 g_assert (cfg->ret);
9936 g_assert (mono_method_signature (method)->pinvoke);
9941 token = read32 (ip + 2);
9942 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9944 if (!cfg->vret_addr) {
9945 g_assert (cfg->ret_var_is_local);
9947 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
9949 EMIT_NEW_RETLOADA (cfg, ins);
9951 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
9953 if (sp != stack_start)
9956 MONO_INST_NEW (cfg, ins, OP_BR);
9957 ins->inst_target_bb = end_bblock;
9958 MONO_ADD_INS (bblock, ins);
9959 link_bblock (cfg, bblock, end_bblock);
9960 start_new_bblock = 1;
9964 case CEE_MONO_CISINST:
9965 case CEE_MONO_CCASTCLASS: {
9970 token = read32 (ip + 2);
9971 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9972 if (ip [1] == CEE_MONO_CISINST)
9973 ins = handle_cisinst (cfg, klass, sp [0]);
9975 ins = handle_ccastclass (cfg, klass, sp [0]);
9981 case CEE_MONO_SAVE_LMF:
9982 case CEE_MONO_RESTORE_LMF:
9983 #ifdef MONO_ARCH_HAVE_LMF_OPS
9984 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
9985 MONO_ADD_INS (bblock, ins);
9986 cfg->need_lmf_area = TRUE;
9990 case CEE_MONO_CLASSCONST:
9991 CHECK_STACK_OVF (1);
9993 token = read32 (ip + 2);
9994 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
9997 inline_costs += 10 * num_calls++;
9999 case CEE_MONO_NOT_TAKEN:
10000 bblock->out_of_line = TRUE;
10004 CHECK_STACK_OVF (1);
10006 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
10007 ins->dreg = alloc_preg (cfg);
10008 ins->inst_offset = (gint32)read32 (ip + 2);
10009 ins->type = STACK_PTR;
10010 MONO_ADD_INS (bblock, ins);
10014 case CEE_MONO_DYN_CALL: {
10015 MonoCallInst *call;
10017 /* It would be easier to call a trampoline, but that would put an
10018 * extra frame on the stack, confusing exception handling. So
10019 * implement it inline using an opcode for now.
10022 if (!cfg->dyn_call_var) {
10023 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
10024 /* prevent it from being register allocated */
10025 cfg->dyn_call_var->flags |= MONO_INST_INDIRECT;
10028 /* Has to use a call inst since it local regalloc expects it */
10029 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
10030 ins = (MonoInst*)call;
10032 ins->sreg1 = sp [0]->dreg;
10033 ins->sreg2 = sp [1]->dreg;
10034 MONO_ADD_INS (bblock, ins);
10036 #ifdef MONO_ARCH_DYN_CALL_PARAM_AREA
10037 cfg->param_area = MAX (cfg->param_area, MONO_ARCH_DYN_CALL_PARAM_AREA);
10041 inline_costs += 10 * num_calls++;
10045 case CEE_MONO_MEMORY_BARRIER: {
10047 emit_memory_barrier (cfg, (int)read32 (ip + 1));
10052 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
10058 case CEE_PREFIX1: {
10061 case CEE_ARGLIST: {
10062 /* somewhat similar to LDTOKEN */
10063 MonoInst *addr, *vtvar;
10064 CHECK_STACK_OVF (1);
10065 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
10067 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
10068 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
10070 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
10071 ins->type = STACK_VTYPE;
10072 ins->klass = mono_defaults.argumenthandle_class;
10085 * The following transforms:
10086 * CEE_CEQ into OP_CEQ
10087 * CEE_CGT into OP_CGT
10088 * CEE_CGT_UN into OP_CGT_UN
10089 * CEE_CLT into OP_CLT
10090 * CEE_CLT_UN into OP_CLT_UN
10092 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
10094 MONO_INST_NEW (cfg, ins, cmp->opcode);
10096 cmp->sreg1 = sp [0]->dreg;
10097 cmp->sreg2 = sp [1]->dreg;
10098 type_from_op (cmp, sp [0], sp [1]);
10100 if ((sp [0]->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
10101 cmp->opcode = OP_LCOMPARE;
10102 else if (sp [0]->type == STACK_R8)
10103 cmp->opcode = OP_FCOMPARE;
10105 cmp->opcode = OP_ICOMPARE;
10106 MONO_ADD_INS (bblock, cmp);
10107 ins->type = STACK_I4;
10108 ins->dreg = alloc_dreg (cfg, ins->type);
10109 type_from_op (ins, sp [0], sp [1]);
10111 if (cmp->opcode == OP_FCOMPARE) {
10113 * The backends expect the fceq opcodes to do the
10116 cmp->opcode = OP_NOP;
10117 ins->sreg1 = cmp->sreg1;
10118 ins->sreg2 = cmp->sreg2;
10120 MONO_ADD_INS (bblock, ins);
10126 MonoInst *argconst;
10127 MonoMethod *cil_method;
10129 CHECK_STACK_OVF (1);
10131 n = read32 (ip + 2);
10132 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
10133 if (!cmethod || mono_loader_get_last_error ())
10135 mono_class_init (cmethod->klass);
10137 mono_save_token_info (cfg, image, n, cmethod);
10139 if (cfg->generic_sharing_context)
10140 context_used = mono_method_check_context_used (cmethod);
10142 cil_method = cmethod;
10143 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
10144 METHOD_ACCESS_FAILURE;
10146 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
10147 if (check_linkdemand (cfg, method, cmethod))
10149 CHECK_CFG_EXCEPTION;
10150 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
10151 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
10155 * Optimize the common case of ldftn+delegate creation
10157 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
10158 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
10159 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
10160 MonoInst *target_ins;
10161 MonoMethod *invoke;
10162 int invoke_context_used = 0;
10164 invoke = mono_get_delegate_invoke (ctor_method->klass);
10165 if (!invoke || !mono_method_signature (invoke))
10168 if (cfg->generic_sharing_context)
10169 invoke_context_used = mono_method_check_context_used (invoke);
10171 target_ins = sp [-1];
10173 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
10174 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method, bblock, ip);
10176 if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
10177 /*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
10178 if (mono_method_signature (invoke)->param_count == mono_method_signature (cmethod)->param_count) {
10179 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0);
10180 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException");
10184 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE)
10185 /* FIXME: SGEN support */
10186 if (invoke_context_used == 0) {
10188 if (cfg->verbose_level > 3)
10189 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
10191 *sp = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used);
10192 CHECK_CFG_EXCEPTION;
10201 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
10202 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
10206 inline_costs += 10 * num_calls++;
10209 case CEE_LDVIRTFTN: {
10210 MonoInst *args [2];
10214 n = read32 (ip + 2);
10215 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
10216 if (!cmethod || mono_loader_get_last_error ())
10218 mono_class_init (cmethod->klass);
10220 if (cfg->generic_sharing_context)
10221 context_used = mono_method_check_context_used (cmethod);
10223 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
10224 if (check_linkdemand (cfg, method, cmethod))
10226 CHECK_CFG_EXCEPTION;
10227 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
10228 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
10234 args [1] = emit_get_rgctx_method (cfg, context_used,
10235 cmethod, MONO_RGCTX_INFO_METHOD);
10238 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
10240 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
10243 inline_costs += 10 * num_calls++;
10247 CHECK_STACK_OVF (1);
10249 n = read16 (ip + 2);
10251 EMIT_NEW_ARGLOAD (cfg, ins, n);
10256 CHECK_STACK_OVF (1);
10258 n = read16 (ip + 2);
10260 NEW_ARGLOADA (cfg, ins, n);
10261 MONO_ADD_INS (cfg->cbb, ins);
10269 n = read16 (ip + 2);
10271 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
10273 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
10277 CHECK_STACK_OVF (1);
10279 n = read16 (ip + 2);
10281 EMIT_NEW_LOCLOAD (cfg, ins, n);
10286 unsigned char *tmp_ip;
10287 CHECK_STACK_OVF (1);
10289 n = read16 (ip + 2);
10292 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
10298 EMIT_NEW_LOCLOADA (cfg, ins, n);
10307 n = read16 (ip + 2);
10309 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
10311 emit_stloc_ir (cfg, sp, header, n);
10318 if (sp != stack_start)
10320 if (cfg->method != method)
10322 * Inlining this into a loop in a parent could lead to
10323 * stack overflows which is different behavior than the
10324 * non-inlined case, thus disable inlining in this case.
10326 goto inline_failure;
10328 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
10329 ins->dreg = alloc_preg (cfg);
10330 ins->sreg1 = sp [0]->dreg;
10331 ins->type = STACK_PTR;
10332 MONO_ADD_INS (cfg->cbb, ins);
10334 cfg->flags |= MONO_CFG_HAS_ALLOCA;
10336 ins->flags |= MONO_INST_INIT;
10341 case CEE_ENDFILTER: {
10342 MonoExceptionClause *clause, *nearest;
10343 int cc, nearest_num;
10347 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
10349 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
10350 ins->sreg1 = (*sp)->dreg;
10351 MONO_ADD_INS (bblock, ins);
10352 start_new_bblock = 1;
10357 for (cc = 0; cc < header->num_clauses; ++cc) {
10358 clause = &header->clauses [cc];
10359 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
10360 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
10361 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
10366 g_assert (nearest);
10367 if ((ip - header->code) != nearest->handler_offset)
10372 case CEE_UNALIGNED_:
10373 ins_flag |= MONO_INST_UNALIGNED;
10374 /* FIXME: record alignment? we can assume 1 for now */
10378 case CEE_VOLATILE_:
10379 ins_flag |= MONO_INST_VOLATILE;
10383 ins_flag |= MONO_INST_TAILCALL;
10384 cfg->flags |= MONO_CFG_HAS_TAIL;
10385 /* Can't inline tail calls at this time */
10386 inline_costs += 100000;
10393 token = read32 (ip + 2);
10394 klass = mini_get_class (method, token, generic_context);
10395 CHECK_TYPELOAD (klass);
10396 if (generic_class_is_reference_type (cfg, klass))
10397 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
10399 mini_emit_initobj (cfg, *sp, NULL, klass);
10403 case CEE_CONSTRAINED_:
10405 token = read32 (ip + 2);
10406 if (method->wrapper_type != MONO_WRAPPER_NONE)
10407 constrained_call = (MonoClass *)mono_method_get_wrapper_data (method, token);
10409 constrained_call = mono_class_get_full (image, token, generic_context);
10410 CHECK_TYPELOAD (constrained_call);
10414 case CEE_INITBLK: {
10415 MonoInst *iargs [3];
10419 if ((ip [1] == CEE_CPBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
10420 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
10421 } else if ((ip [1] == CEE_INITBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
10422 /* emit_memset only works when val == 0 */
10423 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
10425 iargs [0] = sp [0];
10426 iargs [1] = sp [1];
10427 iargs [2] = sp [2];
10428 if (ip [1] == CEE_CPBLK) {
10429 MonoMethod *memcpy_method = get_memcpy_method ();
10430 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
10432 MonoMethod *memset_method = get_memset_method ();
10433 mono_emit_method_call (cfg, memset_method, iargs, NULL);
10443 ins_flag |= MONO_INST_NOTYPECHECK;
10445 ins_flag |= MONO_INST_NORANGECHECK;
10446 /* we ignore the no-nullcheck for now since we
10447 * really do it explicitly only when doing callvirt->call
10451 case CEE_RETHROW: {
10453 int handler_offset = -1;
10455 for (i = 0; i < header->num_clauses; ++i) {
10456 MonoExceptionClause *clause = &header->clauses [i];
10457 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
10458 handler_offset = clause->handler_offset;
10463 bblock->flags |= BB_EXCEPTION_UNSAFE;
10465 g_assert (handler_offset != -1);
10467 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
10468 MONO_INST_NEW (cfg, ins, OP_RETHROW);
10469 ins->sreg1 = load->dreg;
10470 MONO_ADD_INS (bblock, ins);
10472 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
10473 MONO_ADD_INS (bblock, ins);
10476 link_bblock (cfg, bblock, end_bblock);
10477 start_new_bblock = 1;
10485 CHECK_STACK_OVF (1);
10487 token = read32 (ip + 2);
10488 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !method->klass->image->dynamic && !generic_context) {
10489 MonoType *type = mono_type_create_from_typespec (image, token);
10490 token = mono_type_size (type, &ialign);
10492 MonoClass *klass = mono_class_get_full (image, token, generic_context);
10493 CHECK_TYPELOAD (klass);
10494 mono_class_init (klass);
10495 token = mono_class_value_size (klass, &align);
10497 EMIT_NEW_ICONST (cfg, ins, token);
10502 case CEE_REFANYTYPE: {
10503 MonoInst *src_var, *src;
10509 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
10511 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
10512 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
10513 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, type));
10518 case CEE_READONLY_:
10531 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
10541 g_warning ("opcode 0x%02x not handled", *ip);
10545 if (start_new_bblock != 1)
10548 bblock->cil_length = ip - bblock->cil_code;
10549 if (bblock->next_bb) {
10550 /* This could already be set because of inlining, #693905 */
10551 MonoBasicBlock *bb = bblock;
10553 while (bb->next_bb)
10555 bb->next_bb = end_bblock;
10557 bblock->next_bb = end_bblock;
10560 if (cfg->method == method && cfg->domainvar) {
10562 MonoInst *get_domain;
10564 cfg->cbb = init_localsbb;
10566 if (! (get_domain = mono_arch_get_domain_intrinsic (cfg))) {
10567 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
10570 get_domain->dreg = alloc_preg (cfg);
10571 MONO_ADD_INS (cfg->cbb, get_domain);
10573 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
10574 MONO_ADD_INS (cfg->cbb, store);
10577 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
10578 if (cfg->compile_aot)
10579 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
10580 mono_get_got_var (cfg);
10583 if (cfg->method == method && cfg->got_var)
10584 mono_emit_load_got_addr (cfg);
10589 cfg->cbb = init_localsbb;
10591 for (i = 0; i < header->num_locals; ++i) {
10592 MonoType *ptype = header->locals [i];
10593 int t = ptype->type;
10594 dreg = cfg->locals [i]->dreg;
10596 if (t == MONO_TYPE_VALUETYPE && ptype->data.klass->enumtype)
10597 t = mono_class_enum_basetype (ptype->data.klass)->type;
10598 if (ptype->byref) {
10599 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
10600 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
10601 MONO_EMIT_NEW_ICONST (cfg, cfg->locals [i]->dreg, 0);
10602 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
10603 MONO_EMIT_NEW_I8CONST (cfg, cfg->locals [i]->dreg, 0);
10604 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
10605 MONO_INST_NEW (cfg, ins, OP_R8CONST);
10606 ins->type = STACK_R8;
10607 ins->inst_p0 = (void*)&r8_0;
10608 ins->dreg = alloc_dreg (cfg, STACK_R8);
10609 MONO_ADD_INS (init_localsbb, ins);
10610 EMIT_NEW_LOCSTORE (cfg, store, i, ins);
10611 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
10612 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (ptype))) {
10613 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (ptype));
10615 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
10620 if (cfg->init_ref_vars && cfg->method == method) {
10621 /* Emit initialization for ref vars */
10622 // FIXME: Avoid duplication initialization for IL locals.
10623 for (i = 0; i < cfg->num_varinfo; ++i) {
10624 MonoInst *ins = cfg->varinfo [i];
10626 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
10627 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
10631 /* Add a sequence point for method entry/exit events */
10633 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
10634 MONO_ADD_INS (init_localsbb, ins);
10635 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
10636 MONO_ADD_INS (cfg->bb_exit, ins);
10641 if (cfg->method == method) {
10642 MonoBasicBlock *bb;
10643 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10644 bb->region = mono_find_block_region (cfg, bb->real_offset);
10646 mono_create_spvar_for_region (cfg, bb->region);
10647 if (cfg->verbose_level > 2)
10648 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
10652 g_slist_free (class_inits);
10653 dont_inline = g_list_remove (dont_inline, method);
10655 if (inline_costs < 0) {
10658 /* Method is too large */
10659 mname = mono_method_full_name (method, TRUE);
10660 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
10661 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
10663 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
10664 mono_basic_block_free (original_bb);
10668 if ((cfg->verbose_level > 2) && (cfg->method == method))
10669 mono_print_code (cfg, "AFTER METHOD-TO-IR");
10671 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
10672 mono_basic_block_free (original_bb);
10673 return inline_costs;
10676 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
10683 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
10687 set_exception_type_from_invalid_il (cfg, method, ip);
10691 g_slist_free (class_inits);
10692 mono_basic_block_free (original_bb);
10693 dont_inline = g_list_remove (dont_inline, method);
10694 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
10699 store_membase_reg_to_store_membase_imm (int opcode)
10702 case OP_STORE_MEMBASE_REG:
10703 return OP_STORE_MEMBASE_IMM;
10704 case OP_STOREI1_MEMBASE_REG:
10705 return OP_STOREI1_MEMBASE_IMM;
10706 case OP_STOREI2_MEMBASE_REG:
10707 return OP_STOREI2_MEMBASE_IMM;
10708 case OP_STOREI4_MEMBASE_REG:
10709 return OP_STOREI4_MEMBASE_IMM;
10710 case OP_STOREI8_MEMBASE_REG:
10711 return OP_STOREI8_MEMBASE_IMM;
10713 g_assert_not_reached ();
10719 #endif /* DISABLE_JIT */
10722 mono_op_to_op_imm (int opcode)
10726 return OP_IADD_IMM;
10728 return OP_ISUB_IMM;
10730 return OP_IDIV_IMM;
10732 return OP_IDIV_UN_IMM;
10734 return OP_IREM_IMM;
10736 return OP_IREM_UN_IMM;
10738 return OP_IMUL_IMM;
10740 return OP_IAND_IMM;
10744 return OP_IXOR_IMM;
10746 return OP_ISHL_IMM;
10748 return OP_ISHR_IMM;
10750 return OP_ISHR_UN_IMM;
10753 return OP_LADD_IMM;
10755 return OP_LSUB_IMM;
10757 return OP_LAND_IMM;
10761 return OP_LXOR_IMM;
10763 return OP_LSHL_IMM;
10765 return OP_LSHR_IMM;
10767 return OP_LSHR_UN_IMM;
10770 return OP_COMPARE_IMM;
10772 return OP_ICOMPARE_IMM;
10774 return OP_LCOMPARE_IMM;
10776 case OP_STORE_MEMBASE_REG:
10777 return OP_STORE_MEMBASE_IMM;
10778 case OP_STOREI1_MEMBASE_REG:
10779 return OP_STOREI1_MEMBASE_IMM;
10780 case OP_STOREI2_MEMBASE_REG:
10781 return OP_STOREI2_MEMBASE_IMM;
10782 case OP_STOREI4_MEMBASE_REG:
10783 return OP_STOREI4_MEMBASE_IMM;
10785 #if defined(TARGET_X86) || defined (TARGET_AMD64)
10787 return OP_X86_PUSH_IMM;
10788 case OP_X86_COMPARE_MEMBASE_REG:
10789 return OP_X86_COMPARE_MEMBASE_IMM;
10791 #if defined(TARGET_AMD64)
10792 case OP_AMD64_ICOMPARE_MEMBASE_REG:
10793 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
10795 case OP_VOIDCALL_REG:
10796 return OP_VOIDCALL;
10804 return OP_LOCALLOC_IMM;
10811 ldind_to_load_membase (int opcode)
10815 return OP_LOADI1_MEMBASE;
10817 return OP_LOADU1_MEMBASE;
10819 return OP_LOADI2_MEMBASE;
10821 return OP_LOADU2_MEMBASE;
10823 return OP_LOADI4_MEMBASE;
10825 return OP_LOADU4_MEMBASE;
10827 return OP_LOAD_MEMBASE;
10828 case CEE_LDIND_REF:
10829 return OP_LOAD_MEMBASE;
10831 return OP_LOADI8_MEMBASE;
10833 return OP_LOADR4_MEMBASE;
10835 return OP_LOADR8_MEMBASE;
10837 g_assert_not_reached ();
10844 stind_to_store_membase (int opcode)
10848 return OP_STOREI1_MEMBASE_REG;
10850 return OP_STOREI2_MEMBASE_REG;
10852 return OP_STOREI4_MEMBASE_REG;
10854 case CEE_STIND_REF:
10855 return OP_STORE_MEMBASE_REG;
10857 return OP_STOREI8_MEMBASE_REG;
10859 return OP_STORER4_MEMBASE_REG;
10861 return OP_STORER8_MEMBASE_REG;
10863 g_assert_not_reached ();
10870 mono_load_membase_to_load_mem (int opcode)
10872 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
10873 #if defined(TARGET_X86) || defined(TARGET_AMD64)
10875 case OP_LOAD_MEMBASE:
10876 return OP_LOAD_MEM;
10877 case OP_LOADU1_MEMBASE:
10878 return OP_LOADU1_MEM;
10879 case OP_LOADU2_MEMBASE:
10880 return OP_LOADU2_MEM;
10881 case OP_LOADI4_MEMBASE:
10882 return OP_LOADI4_MEM;
10883 case OP_LOADU4_MEMBASE:
10884 return OP_LOADU4_MEM;
10885 #if SIZEOF_REGISTER == 8
10886 case OP_LOADI8_MEMBASE:
10887 return OP_LOADI8_MEM;
10896 op_to_op_dest_membase (int store_opcode, int opcode)
10898 #if defined(TARGET_X86)
10899 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
10904 return OP_X86_ADD_MEMBASE_REG;
10906 return OP_X86_SUB_MEMBASE_REG;
10908 return OP_X86_AND_MEMBASE_REG;
10910 return OP_X86_OR_MEMBASE_REG;
10912 return OP_X86_XOR_MEMBASE_REG;
10915 return OP_X86_ADD_MEMBASE_IMM;
10918 return OP_X86_SUB_MEMBASE_IMM;
10921 return OP_X86_AND_MEMBASE_IMM;
10924 return OP_X86_OR_MEMBASE_IMM;
10927 return OP_X86_XOR_MEMBASE_IMM;
10933 #if defined(TARGET_AMD64)
10934 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
10939 return OP_X86_ADD_MEMBASE_REG;
10941 return OP_X86_SUB_MEMBASE_REG;
10943 return OP_X86_AND_MEMBASE_REG;
10945 return OP_X86_OR_MEMBASE_REG;
10947 return OP_X86_XOR_MEMBASE_REG;
10949 return OP_X86_ADD_MEMBASE_IMM;
10951 return OP_X86_SUB_MEMBASE_IMM;
10953 return OP_X86_AND_MEMBASE_IMM;
10955 return OP_X86_OR_MEMBASE_IMM;
10957 return OP_X86_XOR_MEMBASE_IMM;
10959 return OP_AMD64_ADD_MEMBASE_REG;
10961 return OP_AMD64_SUB_MEMBASE_REG;
10963 return OP_AMD64_AND_MEMBASE_REG;
10965 return OP_AMD64_OR_MEMBASE_REG;
10967 return OP_AMD64_XOR_MEMBASE_REG;
10970 return OP_AMD64_ADD_MEMBASE_IMM;
10973 return OP_AMD64_SUB_MEMBASE_IMM;
10976 return OP_AMD64_AND_MEMBASE_IMM;
10979 return OP_AMD64_OR_MEMBASE_IMM;
10982 return OP_AMD64_XOR_MEMBASE_IMM;
10992 op_to_op_store_membase (int store_opcode, int opcode)
10994 #if defined(TARGET_X86) || defined(TARGET_AMD64)
10997 if (store_opcode == OP_STOREI1_MEMBASE_REG)
10998 return OP_X86_SETEQ_MEMBASE;
11000 if (store_opcode == OP_STOREI1_MEMBASE_REG)
11001 return OP_X86_SETNE_MEMBASE;
11009 op_to_op_src1_membase (int load_opcode, int opcode)
11012 /* FIXME: This has sign extension issues */
11014 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
11015 return OP_X86_COMPARE_MEMBASE8_IMM;
11018 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
11023 return OP_X86_PUSH_MEMBASE;
11024 case OP_COMPARE_IMM:
11025 case OP_ICOMPARE_IMM:
11026 return OP_X86_COMPARE_MEMBASE_IMM;
11029 return OP_X86_COMPARE_MEMBASE_REG;
11033 #ifdef TARGET_AMD64
11034 /* FIXME: This has sign extension issues */
11036 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
11037 return OP_X86_COMPARE_MEMBASE8_IMM;
11042 #ifdef __mono_ilp32__
11043 if (load_opcode == OP_LOADI8_MEMBASE)
11045 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
11047 return OP_X86_PUSH_MEMBASE;
11049 /* FIXME: This only works for 32 bit immediates
11050 case OP_COMPARE_IMM:
11051 case OP_LCOMPARE_IMM:
11052 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
11053 return OP_AMD64_COMPARE_MEMBASE_IMM;
11055 case OP_ICOMPARE_IMM:
11056 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
11057 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
11061 #ifdef __mono_ilp32__
11062 if (load_opcode == OP_LOAD_MEMBASE)
11063 return OP_AMD64_ICOMPARE_MEMBASE_REG;
11064 if (load_opcode == OP_LOADI8_MEMBASE)
11066 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
11068 return OP_AMD64_COMPARE_MEMBASE_REG;
11071 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
11072 return OP_AMD64_ICOMPARE_MEMBASE_REG;
11081 op_to_op_src2_membase (int load_opcode, int opcode)
11084 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
11090 return OP_X86_COMPARE_REG_MEMBASE;
11092 return OP_X86_ADD_REG_MEMBASE;
11094 return OP_X86_SUB_REG_MEMBASE;
11096 return OP_X86_AND_REG_MEMBASE;
11098 return OP_X86_OR_REG_MEMBASE;
11100 return OP_X86_XOR_REG_MEMBASE;
11104 #ifdef TARGET_AMD64
11105 #ifdef __mono_ilp32__
11106 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE) ) {
11108 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)) {
11112 return OP_AMD64_ICOMPARE_REG_MEMBASE;
11114 return OP_X86_ADD_REG_MEMBASE;
11116 return OP_X86_SUB_REG_MEMBASE;
11118 return OP_X86_AND_REG_MEMBASE;
11120 return OP_X86_OR_REG_MEMBASE;
11122 return OP_X86_XOR_REG_MEMBASE;
11124 #ifdef __mono_ilp32__
11125 } else if (load_opcode == OP_LOADI8_MEMBASE) {
11127 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE)) {
11132 return OP_AMD64_COMPARE_REG_MEMBASE;
11134 return OP_AMD64_ADD_REG_MEMBASE;
11136 return OP_AMD64_SUB_REG_MEMBASE;
11138 return OP_AMD64_AND_REG_MEMBASE;
11140 return OP_AMD64_OR_REG_MEMBASE;
11142 return OP_AMD64_XOR_REG_MEMBASE;
11151 mono_op_to_op_imm_noemul (int opcode)
11154 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
11160 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
11168 return mono_op_to_op_imm (opcode);
11172 #ifndef DISABLE_JIT
11175 * mono_handle_global_vregs:
11177 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
11181 mono_handle_global_vregs (MonoCompile *cfg)
11183 gint32 *vreg_to_bb;
11184 MonoBasicBlock *bb;
11187 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
11189 #ifdef MONO_ARCH_SIMD_INTRINSICS
11190 if (cfg->uses_simd_intrinsics)
11191 mono_simd_simplify_indirection (cfg);
11194 /* Find local vregs used in more than one bb */
11195 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
11196 MonoInst *ins = bb->code;
11197 int block_num = bb->block_num;
11199 if (cfg->verbose_level > 2)
11200 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
11203 for (; ins; ins = ins->next) {
11204 const char *spec = INS_INFO (ins->opcode);
11205 int regtype = 0, regindex;
11208 if (G_UNLIKELY (cfg->verbose_level > 2))
11209 mono_print_ins (ins);
11211 g_assert (ins->opcode >= MONO_CEE_LAST);
11213 for (regindex = 0; regindex < 4; regindex ++) {
11216 if (regindex == 0) {
11217 regtype = spec [MONO_INST_DEST];
11218 if (regtype == ' ')
11221 } else if (regindex == 1) {
11222 regtype = spec [MONO_INST_SRC1];
11223 if (regtype == ' ')
11226 } else if (regindex == 2) {
11227 regtype = spec [MONO_INST_SRC2];
11228 if (regtype == ' ')
11231 } else if (regindex == 3) {
11232 regtype = spec [MONO_INST_SRC3];
11233 if (regtype == ' ')
11238 #if SIZEOF_REGISTER == 4
11239 /* In the LLVM case, the long opcodes are not decomposed */
11240 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
11242 * Since some instructions reference the original long vreg,
11243 * and some reference the two component vregs, it is quite hard
11244 * to determine when it needs to be global. So be conservative.
11246 if (!get_vreg_to_inst (cfg, vreg)) {
11247 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
11249 if (cfg->verbose_level > 2)
11250 printf ("LONG VREG R%d made global.\n", vreg);
11254 * Make the component vregs volatile since the optimizations can
11255 * get confused otherwise.
11257 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
11258 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
11262 g_assert (vreg != -1);
11264 prev_bb = vreg_to_bb [vreg];
11265 if (prev_bb == 0) {
11266 /* 0 is a valid block num */
11267 vreg_to_bb [vreg] = block_num + 1;
11268 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
11269 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
11272 if (!get_vreg_to_inst (cfg, vreg)) {
11273 if (G_UNLIKELY (cfg->verbose_level > 2))
11274 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
11278 if (vreg_is_ref (cfg, vreg))
11279 mono_compile_create_var_for_vreg (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL, vreg);
11281 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
11284 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
11287 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
11290 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
11293 g_assert_not_reached ();
11297 /* Flag as having been used in more than one bb */
11298 vreg_to_bb [vreg] = -1;
11304 /* If a variable is used in only one bblock, convert it into a local vreg */
11305 for (i = 0; i < cfg->num_varinfo; i++) {
11306 MonoInst *var = cfg->varinfo [i];
11307 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
11309 switch (var->type) {
11315 #if SIZEOF_REGISTER == 8
11318 #if !defined(TARGET_X86) && !defined(MONO_ARCH_SOFT_FLOAT)
11319 /* Enabling this screws up the fp stack on x86 */
11322 /* Arguments are implicitly global */
11323 /* Putting R4 vars into registers doesn't work currently */
11324 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg) {
11326 * Make that the variable's liveness interval doesn't contain a call, since
11327 * that would cause the lvreg to be spilled, making the whole optimization
11330 /* This is too slow for JIT compilation */
11332 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
11334 int def_index, call_index, ins_index;
11335 gboolean spilled = FALSE;
11340 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
11341 const char *spec = INS_INFO (ins->opcode);
11343 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
11344 def_index = ins_index;
11346 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
11347 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
11348 if (call_index > def_index) {
11354 if (MONO_IS_CALL (ins))
11355 call_index = ins_index;
11365 if (G_UNLIKELY (cfg->verbose_level > 2))
11366 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
11367 var->flags |= MONO_INST_IS_DEAD;
11368 cfg->vreg_to_inst [var->dreg] = NULL;
11375 * Compress the varinfo and vars tables so the liveness computation is faster and
11376 * takes up less space.
11379 for (i = 0; i < cfg->num_varinfo; ++i) {
11380 MonoInst *var = cfg->varinfo [i];
11381 if (pos < i && cfg->locals_start == i)
11382 cfg->locals_start = pos;
11383 if (!(var->flags & MONO_INST_IS_DEAD)) {
11385 cfg->varinfo [pos] = cfg->varinfo [i];
11386 cfg->varinfo [pos]->inst_c0 = pos;
11387 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
11388 cfg->vars [pos].idx = pos;
11389 #if SIZEOF_REGISTER == 4
11390 if (cfg->varinfo [pos]->type == STACK_I8) {
11391 /* Modify the two component vars too */
11394 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
11395 var1->inst_c0 = pos;
11396 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
11397 var1->inst_c0 = pos;
11404 cfg->num_varinfo = pos;
11405 if (cfg->locals_start > cfg->num_varinfo)
11406 cfg->locals_start = cfg->num_varinfo;
11410 * mono_spill_global_vars:
11412 * Generate spill code for variables which are not allocated to registers,
11413 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
11414 * code is generated which could be optimized by the local optimization passes.
11417 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
11419 MonoBasicBlock *bb;
11421 int orig_next_vreg;
11422 guint32 *vreg_to_lvreg;
11424 guint32 i, lvregs_len;
11425 gboolean dest_has_lvreg = FALSE;
11426 guint32 stacktypes [128];
11427 MonoInst **live_range_start, **live_range_end;
11428 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
11430 *need_local_opts = FALSE;
11432 memset (spec2, 0, sizeof (spec2));
11434 /* FIXME: Move this function to mini.c */
11435 stacktypes ['i'] = STACK_PTR;
11436 stacktypes ['l'] = STACK_I8;
11437 stacktypes ['f'] = STACK_R8;
11438 #ifdef MONO_ARCH_SIMD_INTRINSICS
11439 stacktypes ['x'] = STACK_VTYPE;
11442 #if SIZEOF_REGISTER == 4
11443 /* Create MonoInsts for longs */
11444 for (i = 0; i < cfg->num_varinfo; i++) {
11445 MonoInst *ins = cfg->varinfo [i];
11447 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
11448 switch (ins->type) {
11453 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
11456 g_assert (ins->opcode == OP_REGOFFSET);
11458 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
11460 tree->opcode = OP_REGOFFSET;
11461 tree->inst_basereg = ins->inst_basereg;
11462 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
11464 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
11466 tree->opcode = OP_REGOFFSET;
11467 tree->inst_basereg = ins->inst_basereg;
11468 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
11478 if (cfg->compute_gc_maps) {
11479 /* registers need liveness info even for !non refs */
11480 for (i = 0; i < cfg->num_varinfo; i++) {
11481 MonoInst *ins = cfg->varinfo [i];
11483 if (ins->opcode == OP_REGVAR)
11484 ins->flags |= MONO_INST_GC_TRACK;
11488 /* FIXME: widening and truncation */
11491 * As an optimization, when a variable allocated to the stack is first loaded into
11492 * an lvreg, we will remember the lvreg and use it the next time instead of loading
11493 * the variable again.
11495 orig_next_vreg = cfg->next_vreg;
11496 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
11497 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
11501 * These arrays contain the first and last instructions accessing a given
11503 * Since we emit bblocks in the same order we process them here, and we
11504 * don't split live ranges, these will precisely describe the live range of
11505 * the variable, i.e. the instruction range where a valid value can be found
11506 * in the variables location.
11507 * The live range is computed using the liveness info computed by the liveness pass.
11508 * We can't use vmv->range, since that is an abstract live range, and we need
11509 * one which is instruction precise.
11510 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
11512 /* FIXME: Only do this if debugging info is requested */
11513 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
11514 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
11515 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
11516 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
11518 /* Add spill loads/stores */
11519 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
11522 if (cfg->verbose_level > 2)
11523 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
11525 /* Clear vreg_to_lvreg array */
11526 for (i = 0; i < lvregs_len; i++)
11527 vreg_to_lvreg [lvregs [i]] = 0;
11531 MONO_BB_FOR_EACH_INS (bb, ins) {
11532 const char *spec = INS_INFO (ins->opcode);
11533 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
11534 gboolean store, no_lvreg;
11535 int sregs [MONO_MAX_SRC_REGS];
11537 if (G_UNLIKELY (cfg->verbose_level > 2))
11538 mono_print_ins (ins);
11540 if (ins->opcode == OP_NOP)
11544 * We handle LDADDR here as well, since it can only be decomposed
11545 * when variable addresses are known.
11547 if (ins->opcode == OP_LDADDR) {
11548 MonoInst *var = ins->inst_p0;
11550 if (var->opcode == OP_VTARG_ADDR) {
11551 /* Happens on SPARC/S390 where vtypes are passed by reference */
11552 MonoInst *vtaddr = var->inst_left;
11553 if (vtaddr->opcode == OP_REGVAR) {
11554 ins->opcode = OP_MOVE;
11555 ins->sreg1 = vtaddr->dreg;
11557 else if (var->inst_left->opcode == OP_REGOFFSET) {
11558 ins->opcode = OP_LOAD_MEMBASE;
11559 ins->inst_basereg = vtaddr->inst_basereg;
11560 ins->inst_offset = vtaddr->inst_offset;
11564 g_assert (var->opcode == OP_REGOFFSET);
11566 ins->opcode = OP_ADD_IMM;
11567 ins->sreg1 = var->inst_basereg;
11568 ins->inst_imm = var->inst_offset;
11571 *need_local_opts = TRUE;
11572 spec = INS_INFO (ins->opcode);
11575 if (ins->opcode < MONO_CEE_LAST) {
11576 mono_print_ins (ins);
11577 g_assert_not_reached ();
11581 * Store opcodes have destbasereg in the dreg, but in reality, it is an
11585 if (MONO_IS_STORE_MEMBASE (ins)) {
11586 tmp_reg = ins->dreg;
11587 ins->dreg = ins->sreg2;
11588 ins->sreg2 = tmp_reg;
11591 spec2 [MONO_INST_DEST] = ' ';
11592 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
11593 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
11594 spec2 [MONO_INST_SRC3] = ' ';
11596 } else if (MONO_IS_STORE_MEMINDEX (ins))
11597 g_assert_not_reached ();
11602 if (G_UNLIKELY (cfg->verbose_level > 2)) {
11603 printf ("\t %.3s %d", spec, ins->dreg);
11604 num_sregs = mono_inst_get_src_registers (ins, sregs);
11605 for (srcindex = 0; srcindex < num_sregs; ++srcindex)
11606 printf (" %d", sregs [srcindex]);
11613 regtype = spec [MONO_INST_DEST];
11614 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
11617 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
11618 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
11619 MonoInst *store_ins;
11621 MonoInst *def_ins = ins;
11622 int dreg = ins->dreg; /* The original vreg */
11624 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
11626 if (var->opcode == OP_REGVAR) {
11627 ins->dreg = var->dreg;
11628 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
11630 * Instead of emitting a load+store, use a _membase opcode.
11632 g_assert (var->opcode == OP_REGOFFSET);
11633 if (ins->opcode == OP_MOVE) {
11637 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
11638 ins->inst_basereg = var->inst_basereg;
11639 ins->inst_offset = var->inst_offset;
11642 spec = INS_INFO (ins->opcode);
11646 g_assert (var->opcode == OP_REGOFFSET);
11648 prev_dreg = ins->dreg;
11650 /* Invalidate any previous lvreg for this vreg */
11651 vreg_to_lvreg [ins->dreg] = 0;
11655 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
11657 store_opcode = OP_STOREI8_MEMBASE_REG;
11660 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
11662 if (regtype == 'l') {
11663 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
11664 mono_bblock_insert_after_ins (bb, ins, store_ins);
11665 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
11666 mono_bblock_insert_after_ins (bb, ins, store_ins);
11667 def_ins = store_ins;
11670 g_assert (store_opcode != OP_STOREV_MEMBASE);
11672 /* Try to fuse the store into the instruction itself */
11673 /* FIXME: Add more instructions */
11674 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
11675 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
11676 ins->inst_imm = ins->inst_c0;
11677 ins->inst_destbasereg = var->inst_basereg;
11678 ins->inst_offset = var->inst_offset;
11679 spec = INS_INFO (ins->opcode);
11680 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
11681 ins->opcode = store_opcode;
11682 ins->inst_destbasereg = var->inst_basereg;
11683 ins->inst_offset = var->inst_offset;
11687 tmp_reg = ins->dreg;
11688 ins->dreg = ins->sreg2;
11689 ins->sreg2 = tmp_reg;
11692 spec2 [MONO_INST_DEST] = ' ';
11693 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
11694 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
11695 spec2 [MONO_INST_SRC3] = ' ';
11697 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
11698 // FIXME: The backends expect the base reg to be in inst_basereg
11699 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
11701 ins->inst_basereg = var->inst_basereg;
11702 ins->inst_offset = var->inst_offset;
11703 spec = INS_INFO (ins->opcode);
11705 /* printf ("INS: "); mono_print_ins (ins); */
11706 /* Create a store instruction */
11707 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
11709 /* Insert it after the instruction */
11710 mono_bblock_insert_after_ins (bb, ins, store_ins);
11712 def_ins = store_ins;
11715 * We can't assign ins->dreg to var->dreg here, since the
11716 * sregs could use it. So set a flag, and do it after
11719 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
11720 dest_has_lvreg = TRUE;
11725 if (def_ins && !live_range_start [dreg]) {
11726 live_range_start [dreg] = def_ins;
11727 live_range_start_bb [dreg] = bb;
11730 if (cfg->compute_gc_maps && def_ins && (var->flags & MONO_INST_GC_TRACK)) {
11733 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_DEF);
11734 tmp->inst_c1 = dreg;
11735 mono_bblock_insert_after_ins (bb, def_ins, tmp);
11742 num_sregs = mono_inst_get_src_registers (ins, sregs);
11743 for (srcindex = 0; srcindex < 3; ++srcindex) {
11744 regtype = spec [MONO_INST_SRC1 + srcindex];
11745 sreg = sregs [srcindex];
11747 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
11748 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
11749 MonoInst *var = get_vreg_to_inst (cfg, sreg);
11750 MonoInst *use_ins = ins;
11751 MonoInst *load_ins;
11752 guint32 load_opcode;
11754 if (var->opcode == OP_REGVAR) {
11755 sregs [srcindex] = var->dreg;
11756 //mono_inst_set_src_registers (ins, sregs);
11757 live_range_end [sreg] = use_ins;
11758 live_range_end_bb [sreg] = bb;
11760 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
11763 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
11764 /* var->dreg is a hreg */
11765 tmp->inst_c1 = sreg;
11766 mono_bblock_insert_after_ins (bb, ins, tmp);
11772 g_assert (var->opcode == OP_REGOFFSET);
11774 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
11776 g_assert (load_opcode != OP_LOADV_MEMBASE);
11778 if (vreg_to_lvreg [sreg]) {
11779 g_assert (vreg_to_lvreg [sreg] != -1);
11781 /* The variable is already loaded to an lvreg */
11782 if (G_UNLIKELY (cfg->verbose_level > 2))
11783 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
11784 sregs [srcindex] = vreg_to_lvreg [sreg];
11785 //mono_inst_set_src_registers (ins, sregs);
11789 /* Try to fuse the load into the instruction */
11790 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
11791 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
11792 sregs [0] = var->inst_basereg;
11793 //mono_inst_set_src_registers (ins, sregs);
11794 ins->inst_offset = var->inst_offset;
11795 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
11796 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
11797 sregs [1] = var->inst_basereg;
11798 //mono_inst_set_src_registers (ins, sregs);
11799 ins->inst_offset = var->inst_offset;
11801 if (MONO_IS_REAL_MOVE (ins)) {
11802 ins->opcode = OP_NOP;
11805 //printf ("%d ", srcindex); mono_print_ins (ins);
11807 sreg = alloc_dreg (cfg, stacktypes [regtype]);
11809 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
11810 if (var->dreg == prev_dreg) {
11812 * sreg refers to the value loaded by the load
11813 * emitted below, but we need to use ins->dreg
11814 * since it refers to the store emitted earlier.
11818 g_assert (sreg != -1);
11819 vreg_to_lvreg [var->dreg] = sreg;
11820 g_assert (lvregs_len < 1024);
11821 lvregs [lvregs_len ++] = var->dreg;
11825 sregs [srcindex] = sreg;
11826 //mono_inst_set_src_registers (ins, sregs);
11828 if (regtype == 'l') {
11829 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
11830 mono_bblock_insert_before_ins (bb, ins, load_ins);
11831 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
11832 mono_bblock_insert_before_ins (bb, ins, load_ins);
11833 use_ins = load_ins;
11836 #if SIZEOF_REGISTER == 4
11837 g_assert (load_opcode != OP_LOADI8_MEMBASE);
11839 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
11840 mono_bblock_insert_before_ins (bb, ins, load_ins);
11841 use_ins = load_ins;
11845 if (var->dreg < orig_next_vreg) {
11846 live_range_end [var->dreg] = use_ins;
11847 live_range_end_bb [var->dreg] = bb;
11850 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
11853 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
11854 tmp->inst_c1 = var->dreg;
11855 mono_bblock_insert_after_ins (bb, ins, tmp);
11859 mono_inst_set_src_registers (ins, sregs);
11861 if (dest_has_lvreg) {
11862 g_assert (ins->dreg != -1);
11863 vreg_to_lvreg [prev_dreg] = ins->dreg;
11864 g_assert (lvregs_len < 1024);
11865 lvregs [lvregs_len ++] = prev_dreg;
11866 dest_has_lvreg = FALSE;
11870 tmp_reg = ins->dreg;
11871 ins->dreg = ins->sreg2;
11872 ins->sreg2 = tmp_reg;
11875 if (MONO_IS_CALL (ins)) {
11876 /* Clear vreg_to_lvreg array */
11877 for (i = 0; i < lvregs_len; i++)
11878 vreg_to_lvreg [lvregs [i]] = 0;
11880 } else if (ins->opcode == OP_NOP) {
11882 MONO_INST_NULLIFY_SREGS (ins);
11885 if (cfg->verbose_level > 2)
11886 mono_print_ins_index (1, ins);
11889 /* Extend the live range based on the liveness info */
11890 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
11891 for (i = 0; i < cfg->num_varinfo; i ++) {
11892 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
11894 if (vreg_is_volatile (cfg, vi->vreg))
11895 /* The liveness info is incomplete */
11898 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
11899 /* Live from at least the first ins of this bb */
11900 live_range_start [vi->vreg] = bb->code;
11901 live_range_start_bb [vi->vreg] = bb;
11904 if (mono_bitset_test_fast (bb->live_out_set, i)) {
11905 /* Live at least until the last ins of this bb */
11906 live_range_end [vi->vreg] = bb->last_ins;
11907 live_range_end_bb [vi->vreg] = bb;
11913 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
11915 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
11916 * by storing the current native offset into MonoMethodVar->live_range_start/end.
11918 if (cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
11919 for (i = 0; i < cfg->num_varinfo; ++i) {
11920 int vreg = MONO_VARINFO (cfg, i)->vreg;
11923 if (live_range_start [vreg]) {
11924 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
11926 ins->inst_c1 = vreg;
11927 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
11929 if (live_range_end [vreg]) {
11930 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
11932 ins->inst_c1 = vreg;
11933 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
11934 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
11936 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
11942 g_free (live_range_start);
11943 g_free (live_range_end);
11944 g_free (live_range_start_bb);
11945 g_free (live_range_end_bb);
11950 * - use 'iadd' instead of 'int_add'
11951 * - handling ovf opcodes: decompose in method_to_ir.
11952 * - unify iregs/fregs
11953 * -> partly done, the missing parts are:
11954 * - a more complete unification would involve unifying the hregs as well, so
11955 * code wouldn't need if (fp) all over the place. but that would mean the hregs
11956 * would no longer map to the machine hregs, so the code generators would need to
11957 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
11958 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
11959 * fp/non-fp branches speeds it up by about 15%.
11960 * - use sext/zext opcodes instead of shifts
11962 * - get rid of TEMPLOADs if possible and use vregs instead
11963 * - clean up usage of OP_P/OP_ opcodes
11964 * - cleanup usage of DUMMY_USE
11965 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
11967 * - set the stack type and allocate a dreg in the EMIT_NEW macros
11968 * - get rid of all the <foo>2 stuff when the new JIT is ready.
11969 * - make sure handle_stack_args () is called before the branch is emitted
11970 * - when the new IR is done, get rid of all unused stuff
11971 * - COMPARE/BEQ as separate instructions or unify them ?
11972 * - keeping them separate allows specialized compare instructions like
11973 * compare_imm, compare_membase
11974 * - most back ends unify fp compare+branch, fp compare+ceq
11975 * - integrate mono_save_args into inline_method
11976 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
11977 * - handle long shift opts on 32 bit platforms somehow: they require
11978 * 3 sregs (2 for arg1 and 1 for arg2)
11979 * - make byref a 'normal' type.
11980 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
11981 * variable if needed.
11982 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
11983 * like inline_method.
11984 * - remove inlining restrictions
11985 * - fix LNEG and enable cfold of INEG
11986 * - generalize x86 optimizations like ldelema as a peephole optimization
11987 * - add store_mem_imm for amd64
11988 * - optimize the loading of the interruption flag in the managed->native wrappers
11989 * - avoid special handling of OP_NOP in passes
11990 * - move code inserting instructions into one function/macro.
11991 * - try a coalescing phase after liveness analysis
11992 * - add float -> vreg conversion + local optimizations on !x86
11993 * - figure out how to handle decomposed branches during optimizations, ie.
11994 * compare+branch, op_jump_table+op_br etc.
11995 * - promote RuntimeXHandles to vregs
11996 * - vtype cleanups:
11997 * - add a NEW_VARLOADA_VREG macro
11998 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
11999 * accessing vtype fields.
12000 * - get rid of I8CONST on 64 bit platforms
12001 * - dealing with the increase in code size due to branches created during opcode
12003 * - use extended basic blocks
12004 * - all parts of the JIT
12005 * - handle_global_vregs () && local regalloc
12006 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
12007 * - sources of increase in code size:
12010 * - isinst and castclass
12011 * - lvregs not allocated to global registers even if used multiple times
12012 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
12014 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
12015 * - add all micro optimizations from the old JIT
12016 * - put tree optimizations into the deadce pass
12017 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
12018 * specific function.
12019 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
12020 * fcompare + branchCC.
12021 * - create a helper function for allocating a stack slot, taking into account
12022 * MONO_CFG_HAS_SPILLUP.
12024 * - merge the ia64 switch changes.
12025 * - optimize mono_regstate2_alloc_int/float.
12026 * - fix the pessimistic handling of variables accessed in exception handler blocks.
12027 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
12028 * parts of the tree could be separated by other instructions, killing the tree
12029 * arguments, or stores killing loads etc. Also, should we fold loads into other
12030 * instructions if the result of the load is used multiple times ?
12031 * - make the REM_IMM optimization in mini-x86.c arch-independent.
12032 * - LAST MERGE: 108395.
12033 * - when returning vtypes in registers, generate IR and append it to the end of the
12034 * last bb instead of doing it in the epilog.
12035 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
12043 - When to decompose opcodes:
12044 - earlier: this makes some optimizations hard to implement, since the low level IR
12045 no longer contains the neccessary information. But it is easier to do.
12046 - later: harder to implement, enables more optimizations.
12047 - Branches inside bblocks:
12048 - created when decomposing complex opcodes.
12049 - branches to another bblock: harmless, but not tracked by the branch
12050 optimizations, so need to branch to a label at the start of the bblock.
12051 - branches to inside the same bblock: very problematic, trips up the local
12052 reg allocator. Can be fixed by spitting the current bblock, but that is a
12053 complex operation, since some local vregs can become global vregs etc.
12054 - Local/global vregs:
12055 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
12056 local register allocator.
12057 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
12058 structure, created by mono_create_var (). Assigned to hregs or the stack by
12059 the global register allocator.
12060 - When to do optimizations like alu->alu_imm:
12061 - earlier -> saves work later on since the IR will be smaller/simpler
12062 - later -> can work on more instructions
12063 - Handling of valuetypes:
12064 - When a vtype is pushed on the stack, a new temporary is created, an
12065 instruction computing its address (LDADDR) is emitted and pushed on
12066 the stack. Need to optimize cases when the vtype is used immediately as in
12067 argument passing, stloc etc.
12068 - Instead of the to_end stuff in the old JIT, simply call the function handling
12069 the values on the stack before emitting the last instruction of the bb.
12072 #endif /* DISABLE_JIT */