2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
9 * Copyright 2003-2010 Novell, Inc (http://www.novell.com)
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
24 #ifdef HAVE_SYS_TIME_H
32 #include <mono/utils/memcheck.h>
34 #include <mono/metadata/assembly.h>
35 #include <mono/metadata/attrdefs.h>
36 #include <mono/metadata/loader.h>
37 #include <mono/metadata/tabledefs.h>
38 #include <mono/metadata/class.h>
39 #include <mono/metadata/object.h>
40 #include <mono/metadata/exception.h>
41 #include <mono/metadata/opcodes.h>
42 #include <mono/metadata/mono-endian.h>
43 #include <mono/metadata/tokentype.h>
44 #include <mono/metadata/tabledefs.h>
45 #include <mono/metadata/marshal.h>
46 #include <mono/metadata/debug-helpers.h>
47 #include <mono/metadata/mono-debug.h>
48 #include <mono/metadata/gc-internal.h>
49 #include <mono/metadata/security-manager.h>
50 #include <mono/metadata/threads-types.h>
51 #include <mono/metadata/security-core-clr.h>
52 #include <mono/metadata/monitor.h>
53 #include <mono/metadata/profiler-private.h>
54 #include <mono/metadata/profiler.h>
55 #include <mono/utils/mono-compiler.h>
56 #include <mono/utils/mono-memory-model.h>
57 #include <mono/metadata/mono-basic-block.h>
64 #include "jit-icalls.h"
66 #include "debugger-agent.h"
68 #define BRANCH_COST 10
69 #define INLINE_LENGTH_LIMIT 20
70 #define INLINE_FAILURE do {\
71 if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE))\
74 #define CHECK_CFG_EXCEPTION do {\
75 if (cfg->exception_type != MONO_EXCEPTION_NONE)\
78 #define METHOD_ACCESS_FAILURE do { \
79 char *method_fname = mono_method_full_name (method, TRUE); \
80 char *cil_method_fname = mono_method_full_name (cil_method, TRUE); \
81 mono_cfg_set_exception (cfg, MONO_EXCEPTION_METHOD_ACCESS); \
82 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname); \
83 g_free (method_fname); \
84 g_free (cil_method_fname); \
85 goto exception_exit; \
87 #define FIELD_ACCESS_FAILURE do { \
88 char *method_fname = mono_method_full_name (method, TRUE); \
89 char *field_fname = mono_field_full_name (field); \
90 mono_cfg_set_exception (cfg, MONO_EXCEPTION_FIELD_ACCESS); \
91 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); \
92 g_free (method_fname); \
93 g_free (field_fname); \
94 goto exception_exit; \
96 #define GENERIC_SHARING_FAILURE(opcode) do { \
97 if (cfg->generic_sharing_context) { \
98 if (cfg->verbose_level > 2) \
99 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __LINE__); \
100 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
101 goto exception_exit; \
104 #define OUT_OF_MEMORY_FAILURE do { \
105 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OUT_OF_MEMORY); \
106 goto exception_exit; \
108 /* Determine whenever 'ins' represents a load of the 'this' argument */
109 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
111 static int ldind_to_load_membase (int opcode);
112 static int stind_to_store_membase (int opcode);
114 int mono_op_to_op_imm (int opcode);
115 int mono_op_to_op_imm_noemul (int opcode);
117 MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
118 void mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native);
119 void mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass);
121 /* helper methods signatures */
122 static MonoMethodSignature *helper_sig_class_init_trampoline = NULL;
123 static MonoMethodSignature *helper_sig_domain_get = NULL;
124 static MonoMethodSignature *helper_sig_generic_class_init_trampoline = NULL;
125 static MonoMethodSignature *helper_sig_generic_class_init_trampoline_llvm = NULL;
126 static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline = NULL;
127 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline = NULL;
128 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline_llvm = NULL;
131 * Instruction metadata
139 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
140 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
146 #if SIZEOF_REGISTER == 8
151 /* keep in sync with the enum in mini.h */
154 #include "mini-ops.h"
159 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
160 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
162 * This should contain the index of the last sreg + 1. This is not the same
163 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
165 const gint8 ins_sreg_counts[] = {
166 #include "mini-ops.h"
171 #define MONO_INIT_VARINFO(vi,id) do { \
172 (vi)->range.first_use.pos.bid = 0xffff; \
178 mono_inst_set_src_registers (MonoInst *ins, int *regs)
180 ins->sreg1 = regs [0];
181 ins->sreg2 = regs [1];
182 ins->sreg3 = regs [2];
186 mono_alloc_ireg (MonoCompile *cfg)
188 return alloc_ireg (cfg);
192 mono_alloc_freg (MonoCompile *cfg)
194 return alloc_freg (cfg);
198 mono_alloc_preg (MonoCompile *cfg)
200 return alloc_preg (cfg);
204 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
206 return alloc_dreg (cfg, stack_type);
210 * mono_alloc_ireg_ref:
212 * Allocate an IREG, and mark it as holding a GC ref.
215 mono_alloc_ireg_ref (MonoCompile *cfg)
217 return alloc_ireg_ref (cfg);
221 * mono_alloc_ireg_mp:
223 * Allocate an IREG, and mark it as holding a managed pointer.
226 mono_alloc_ireg_mp (MonoCompile *cfg)
228 return alloc_ireg_mp (cfg);
232 * mono_alloc_ireg_copy:
234 * Allocate an IREG with the same GC type as VREG.
237 mono_alloc_ireg_copy (MonoCompile *cfg, guint32 vreg)
239 if (vreg_is_ref (cfg, vreg))
240 return alloc_ireg_ref (cfg);
241 else if (vreg_is_mp (cfg, vreg))
242 return alloc_ireg_mp (cfg);
244 return alloc_ireg (cfg);
248 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
254 switch (type->type) {
257 case MONO_TYPE_BOOLEAN:
269 case MONO_TYPE_FNPTR:
271 case MONO_TYPE_CLASS:
272 case MONO_TYPE_STRING:
273 case MONO_TYPE_OBJECT:
274 case MONO_TYPE_SZARRAY:
275 case MONO_TYPE_ARRAY:
279 #if SIZEOF_REGISTER == 8
288 case MONO_TYPE_VALUETYPE:
289 if (type->data.klass->enumtype) {
290 type = mono_class_enum_basetype (type->data.klass);
293 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
296 case MONO_TYPE_TYPEDBYREF:
298 case MONO_TYPE_GENERICINST:
299 type = &type->data.generic_class->container_class->byval_arg;
303 g_assert (cfg->generic_sharing_context);
306 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
312 mono_print_bb (MonoBasicBlock *bb, const char *msg)
317 printf ("\n%s %d: [IN: ", msg, bb->block_num);
318 for (i = 0; i < bb->in_count; ++i)
319 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
321 for (i = 0; i < bb->out_count; ++i)
322 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
324 for (tree = bb->code; tree; tree = tree->next)
325 mono_print_ins_index (-1, tree);
329 mono_create_helper_signatures (void)
331 helper_sig_domain_get = mono_create_icall_signature ("ptr");
332 helper_sig_class_init_trampoline = mono_create_icall_signature ("void");
333 helper_sig_generic_class_init_trampoline = mono_create_icall_signature ("void");
334 helper_sig_generic_class_init_trampoline_llvm = mono_create_icall_signature ("void ptr");
335 helper_sig_rgctx_lazy_fetch_trampoline = mono_create_icall_signature ("ptr ptr");
336 helper_sig_monitor_enter_exit_trampoline = mono_create_icall_signature ("void");
337 helper_sig_monitor_enter_exit_trampoline_llvm = mono_create_icall_signature ("void object");
341 * Can't put this at the beginning, since other files reference stuff from this
346 #define UNVERIFIED do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto unverified; } while (0)
348 #define LOAD_ERROR do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto load_error; } while (0)
350 #define GET_BBLOCK(cfg,tblock,ip) do { \
351 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
353 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
354 NEW_BBLOCK (cfg, (tblock)); \
355 (tblock)->cil_code = (ip); \
356 ADD_BBLOCK (cfg, (tblock)); \
360 #if defined(TARGET_X86) || defined(TARGET_AMD64)
361 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
362 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
363 (dest)->dreg = alloc_ireg_mp ((cfg)); \
364 (dest)->sreg1 = (sr1); \
365 (dest)->sreg2 = (sr2); \
366 (dest)->inst_imm = (imm); \
367 (dest)->backend.shift_amount = (shift); \
368 MONO_ADD_INS ((cfg)->cbb, (dest)); \
372 #if SIZEOF_REGISTER == 8
373 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
374 /* FIXME: Need to add many more cases */ \
375 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
377 int dr = alloc_preg (cfg); \
378 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
379 (ins)->sreg2 = widen->dreg; \
383 #define ADD_WIDEN_OP(ins, arg1, arg2)
386 #define ADD_BINOP(op) do { \
387 MONO_INST_NEW (cfg, ins, (op)); \
389 ins->sreg1 = sp [0]->dreg; \
390 ins->sreg2 = sp [1]->dreg; \
391 type_from_op (ins, sp [0], sp [1]); \
393 /* Have to insert a widening op */ \
394 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
395 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
396 MONO_ADD_INS ((cfg)->cbb, (ins)); \
397 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
400 #define ADD_UNOP(op) do { \
401 MONO_INST_NEW (cfg, ins, (op)); \
403 ins->sreg1 = sp [0]->dreg; \
404 type_from_op (ins, sp [0], NULL); \
406 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
407 MONO_ADD_INS ((cfg)->cbb, (ins)); \
408 *sp++ = mono_decompose_opcode (cfg, ins); \
411 #define ADD_BINCOND(next_block) do { \
414 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
415 cmp->sreg1 = sp [0]->dreg; \
416 cmp->sreg2 = sp [1]->dreg; \
417 type_from_op (cmp, sp [0], sp [1]); \
419 type_from_op (ins, sp [0], sp [1]); \
420 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
421 GET_BBLOCK (cfg, tblock, target); \
422 link_bblock (cfg, bblock, tblock); \
423 ins->inst_true_bb = tblock; \
424 if ((next_block)) { \
425 link_bblock (cfg, bblock, (next_block)); \
426 ins->inst_false_bb = (next_block); \
427 start_new_bblock = 1; \
429 GET_BBLOCK (cfg, tblock, ip); \
430 link_bblock (cfg, bblock, tblock); \
431 ins->inst_false_bb = tblock; \
432 start_new_bblock = 2; \
434 if (sp != stack_start) { \
435 handle_stack_args (cfg, stack_start, sp - stack_start); \
436 CHECK_UNVERIFIABLE (cfg); \
438 MONO_ADD_INS (bblock, cmp); \
439 MONO_ADD_INS (bblock, ins); \
443 * link_bblock: Links two basic blocks
445 * links two basic blocks in the control flow graph, the 'from'
446 * argument is the starting block and the 'to' argument is the block
447 * the control flow ends to after 'from'.
450 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
452 MonoBasicBlock **newa;
456 if (from->cil_code) {
458 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
460 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
463 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
465 printf ("edge from entry to exit\n");
470 for (i = 0; i < from->out_count; ++i) {
471 if (to == from->out_bb [i]) {
477 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
478 for (i = 0; i < from->out_count; ++i) {
479 newa [i] = from->out_bb [i];
487 for (i = 0; i < to->in_count; ++i) {
488 if (from == to->in_bb [i]) {
494 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
495 for (i = 0; i < to->in_count; ++i) {
496 newa [i] = to->in_bb [i];
505 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
507 link_bblock (cfg, from, to);
511 * mono_find_block_region:
513 * We mark each basic block with a region ID. We use that to avoid BB
514 * optimizations when blocks are in different regions.
517 * A region token that encodes where this region is, and information
518 * about the clause owner for this block.
520 * The region encodes the try/catch/filter clause that owns this block
521 * as well as the type. -1 is a special value that represents a block
522 * that is in none of try/catch/filter.
525 mono_find_block_region (MonoCompile *cfg, int offset)
527 MonoMethodHeader *header = cfg->header;
528 MonoExceptionClause *clause;
531 for (i = 0; i < header->num_clauses; ++i) {
532 clause = &header->clauses [i];
533 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
534 (offset < (clause->handler_offset)))
535 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
537 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
538 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
539 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
540 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
541 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
543 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
546 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
547 return ((i + 1) << 8) | clause->flags;
554 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
556 MonoMethodHeader *header = cfg->header;
557 MonoExceptionClause *clause;
561 for (i = 0; i < header->num_clauses; ++i) {
562 clause = &header->clauses [i];
563 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
564 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
565 if (clause->flags == type)
566 res = g_list_append (res, clause);
573 mono_create_spvar_for_region (MonoCompile *cfg, int region)
577 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
581 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
582 /* prevent it from being register allocated */
583 var->flags |= MONO_INST_INDIRECT;
585 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
589 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
591 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
595 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
599 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
603 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
604 /* prevent it from being register allocated */
605 var->flags |= MONO_INST_INDIRECT;
607 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
613 * Returns the type used in the eval stack when @type is loaded.
614 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
617 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
621 inst->klass = klass = mono_class_from_mono_type (type);
623 inst->type = STACK_MP;
628 switch (type->type) {
630 inst->type = STACK_INV;
634 case MONO_TYPE_BOOLEAN:
640 inst->type = STACK_I4;
645 case MONO_TYPE_FNPTR:
646 inst->type = STACK_PTR;
648 case MONO_TYPE_CLASS:
649 case MONO_TYPE_STRING:
650 case MONO_TYPE_OBJECT:
651 case MONO_TYPE_SZARRAY:
652 case MONO_TYPE_ARRAY:
653 inst->type = STACK_OBJ;
657 inst->type = STACK_I8;
661 inst->type = STACK_R8;
663 case MONO_TYPE_VALUETYPE:
664 if (type->data.klass->enumtype) {
665 type = mono_class_enum_basetype (type->data.klass);
669 inst->type = STACK_VTYPE;
672 case MONO_TYPE_TYPEDBYREF:
673 inst->klass = mono_defaults.typed_reference_class;
674 inst->type = STACK_VTYPE;
676 case MONO_TYPE_GENERICINST:
677 type = &type->data.generic_class->container_class->byval_arg;
680 case MONO_TYPE_MVAR :
681 /* FIXME: all the arguments must be references for now,
682 * later look inside cfg and see if the arg num is
685 g_assert (cfg->generic_sharing_context);
686 inst->type = STACK_OBJ;
689 g_error ("unknown type 0x%02x in eval stack type", type->type);
694 * The following tables are used to quickly validate the IL code in type_from_op ().
697 bin_num_table [STACK_MAX] [STACK_MAX] = {
698 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
699 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
700 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
701 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
702 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV},
703 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
704 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
705 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
710 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV
713 /* reduce the size of this table */
715 bin_int_table [STACK_MAX] [STACK_MAX] = {
716 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
717 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
718 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
719 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
720 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
721 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
722 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
723 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
727 bin_comp_table [STACK_MAX] [STACK_MAX] = {
728 /* Inv i L p F & O vt */
730 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
731 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
732 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
733 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
734 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
735 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
736 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
739 /* reduce the size of this table */
741 shift_table [STACK_MAX] [STACK_MAX] = {
742 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
743 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
744 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
745 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
746 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
747 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
748 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
749 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
753 * Tables to map from the non-specific opcode to the matching
754 * type-specific opcode.
756 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
758 binops_op_map [STACK_MAX] = {
759 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD
762 /* handles from CEE_NEG to CEE_CONV_U8 */
764 unops_op_map [STACK_MAX] = {
765 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG
768 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
770 ovfops_op_map [STACK_MAX] = {
771 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2
774 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
776 ovf2ops_op_map [STACK_MAX] = {
777 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
780 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
782 ovf3ops_op_map [STACK_MAX] = {
783 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1
786 /* handles from CEE_BEQ to CEE_BLT_UN */
788 beqops_op_map [STACK_MAX] = {
789 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ
792 /* handles from CEE_CEQ to CEE_CLT_UN */
794 ceqops_op_map [STACK_MAX] = {
795 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ
799 * Sets ins->type (the type on the eval stack) according to the
800 * type of the opcode and the arguments to it.
801 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
803 * FIXME: this function sets ins->type unconditionally in some cases, but
804 * it should set it to invalid for some types (a conv.x on an object)
807 type_from_op (MonoInst *ins, MonoInst *src1, MonoInst *src2) {
809 switch (ins->opcode) {
816 /* FIXME: check unverifiable args for STACK_MP */
817 ins->type = bin_num_table [src1->type] [src2->type];
818 ins->opcode += binops_op_map [ins->type];
825 ins->type = bin_int_table [src1->type] [src2->type];
826 ins->opcode += binops_op_map [ins->type];
831 ins->type = shift_table [src1->type] [src2->type];
832 ins->opcode += binops_op_map [ins->type];
837 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
838 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
839 ins->opcode = OP_LCOMPARE;
840 else if (src1->type == STACK_R8)
841 ins->opcode = OP_FCOMPARE;
843 ins->opcode = OP_ICOMPARE;
845 case OP_ICOMPARE_IMM:
846 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
847 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
848 ins->opcode = OP_LCOMPARE_IMM;
860 ins->opcode += beqops_op_map [src1->type];
863 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
864 ins->opcode += ceqops_op_map [src1->type];
870 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
871 ins->opcode += ceqops_op_map [src1->type];
875 ins->type = neg_table [src1->type];
876 ins->opcode += unops_op_map [ins->type];
879 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
880 ins->type = src1->type;
882 ins->type = STACK_INV;
883 ins->opcode += unops_op_map [ins->type];
889 ins->type = STACK_I4;
890 ins->opcode += unops_op_map [src1->type];
893 ins->type = STACK_R8;
894 switch (src1->type) {
897 ins->opcode = OP_ICONV_TO_R_UN;
900 ins->opcode = OP_LCONV_TO_R_UN;
904 case CEE_CONV_OVF_I1:
905 case CEE_CONV_OVF_U1:
906 case CEE_CONV_OVF_I2:
907 case CEE_CONV_OVF_U2:
908 case CEE_CONV_OVF_I4:
909 case CEE_CONV_OVF_U4:
910 ins->type = STACK_I4;
911 ins->opcode += ovf3ops_op_map [src1->type];
913 case CEE_CONV_OVF_I_UN:
914 case CEE_CONV_OVF_U_UN:
915 ins->type = STACK_PTR;
916 ins->opcode += ovf2ops_op_map [src1->type];
918 case CEE_CONV_OVF_I1_UN:
919 case CEE_CONV_OVF_I2_UN:
920 case CEE_CONV_OVF_I4_UN:
921 case CEE_CONV_OVF_U1_UN:
922 case CEE_CONV_OVF_U2_UN:
923 case CEE_CONV_OVF_U4_UN:
924 ins->type = STACK_I4;
925 ins->opcode += ovf2ops_op_map [src1->type];
928 ins->type = STACK_PTR;
929 switch (src1->type) {
931 ins->opcode = OP_ICONV_TO_U;
935 #if SIZEOF_VOID_P == 8
936 ins->opcode = OP_LCONV_TO_U;
938 ins->opcode = OP_MOVE;
942 ins->opcode = OP_LCONV_TO_U;
945 ins->opcode = OP_FCONV_TO_U;
951 ins->type = STACK_I8;
952 ins->opcode += unops_op_map [src1->type];
954 case CEE_CONV_OVF_I8:
955 case CEE_CONV_OVF_U8:
956 ins->type = STACK_I8;
957 ins->opcode += ovf3ops_op_map [src1->type];
959 case CEE_CONV_OVF_U8_UN:
960 case CEE_CONV_OVF_I8_UN:
961 ins->type = STACK_I8;
962 ins->opcode += ovf2ops_op_map [src1->type];
966 ins->type = STACK_R8;
967 ins->opcode += unops_op_map [src1->type];
970 ins->type = STACK_R8;
974 ins->type = STACK_I4;
975 ins->opcode += ovfops_op_map [src1->type];
980 ins->type = STACK_PTR;
981 ins->opcode += ovfops_op_map [src1->type];
989 ins->type = bin_num_table [src1->type] [src2->type];
990 ins->opcode += ovfops_op_map [src1->type];
991 if (ins->type == STACK_R8)
992 ins->type = STACK_INV;
994 case OP_LOAD_MEMBASE:
995 ins->type = STACK_PTR;
997 case OP_LOADI1_MEMBASE:
998 case OP_LOADU1_MEMBASE:
999 case OP_LOADI2_MEMBASE:
1000 case OP_LOADU2_MEMBASE:
1001 case OP_LOADI4_MEMBASE:
1002 case OP_LOADU4_MEMBASE:
1003 ins->type = STACK_PTR;
1005 case OP_LOADI8_MEMBASE:
1006 ins->type = STACK_I8;
1008 case OP_LOADR4_MEMBASE:
1009 case OP_LOADR8_MEMBASE:
1010 ins->type = STACK_R8;
1013 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
1017 if (ins->type == STACK_MP)
1018 ins->klass = mono_defaults.object_class;
1023 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
1029 param_table [STACK_MAX] [STACK_MAX] = {
1034 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
1038 switch (args->type) {
1048 for (i = 0; i < sig->param_count; ++i) {
1049 switch (args [i].type) {
1053 if (!sig->params [i]->byref)
1057 if (sig->params [i]->byref)
1059 switch (sig->params [i]->type) {
1060 case MONO_TYPE_CLASS:
1061 case MONO_TYPE_STRING:
1062 case MONO_TYPE_OBJECT:
1063 case MONO_TYPE_SZARRAY:
1064 case MONO_TYPE_ARRAY:
1071 if (sig->params [i]->byref)
1073 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1082 /*if (!param_table [args [i].type] [sig->params [i]->type])
1090 * When we need a pointer to the current domain many times in a method, we
1091 * call mono_domain_get() once and we store the result in a local variable.
1092 * This function returns the variable that represents the MonoDomain*.
1094 inline static MonoInst *
1095 mono_get_domainvar (MonoCompile *cfg)
1097 if (!cfg->domainvar)
1098 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1099 return cfg->domainvar;
1103 * The got_var contains the address of the Global Offset Table when AOT
1107 mono_get_got_var (MonoCompile *cfg)
1109 #ifdef MONO_ARCH_NEED_GOT_VAR
1110 if (!cfg->compile_aot)
1112 if (!cfg->got_var) {
1113 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1115 return cfg->got_var;
1122 mono_get_vtable_var (MonoCompile *cfg)
1124 g_assert (cfg->generic_sharing_context);
1126 if (!cfg->rgctx_var) {
1127 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1128 /* force the var to be stack allocated */
1129 cfg->rgctx_var->flags |= MONO_INST_INDIRECT;
1132 return cfg->rgctx_var;
1136 type_from_stack_type (MonoInst *ins) {
1137 switch (ins->type) {
1138 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1139 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1140 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1141 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1143 return &ins->klass->this_arg;
1144 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1145 case STACK_VTYPE: return &ins->klass->byval_arg;
1147 g_error ("stack type %d to monotype not handled\n", ins->type);
1152 static G_GNUC_UNUSED int
1153 type_to_stack_type (MonoType *t)
1155 t = mono_type_get_underlying_type (t);
1159 case MONO_TYPE_BOOLEAN:
1162 case MONO_TYPE_CHAR:
1169 case MONO_TYPE_FNPTR:
1171 case MONO_TYPE_CLASS:
1172 case MONO_TYPE_STRING:
1173 case MONO_TYPE_OBJECT:
1174 case MONO_TYPE_SZARRAY:
1175 case MONO_TYPE_ARRAY:
1183 case MONO_TYPE_VALUETYPE:
1184 case MONO_TYPE_TYPEDBYREF:
1186 case MONO_TYPE_GENERICINST:
1187 if (mono_type_generic_inst_is_valuetype (t))
1193 g_assert_not_reached ();
1200 array_access_to_klass (int opcode)
1204 return mono_defaults.byte_class;
1206 return mono_defaults.uint16_class;
1209 return mono_defaults.int_class;
1212 return mono_defaults.sbyte_class;
1215 return mono_defaults.int16_class;
1218 return mono_defaults.int32_class;
1220 return mono_defaults.uint32_class;
1223 return mono_defaults.int64_class;
1226 return mono_defaults.single_class;
1229 return mono_defaults.double_class;
1230 case CEE_LDELEM_REF:
1231 case CEE_STELEM_REF:
1232 return mono_defaults.object_class;
1234 g_assert_not_reached ();
1240 * We try to share variables when possible
1243 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1248 /* inlining can result in deeper stacks */
1249 if (slot >= cfg->header->max_stack)
1250 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1252 pos = ins->type - 1 + slot * STACK_MAX;
1254 switch (ins->type) {
1261 if ((vnum = cfg->intvars [pos]))
1262 return cfg->varinfo [vnum];
1263 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1264 cfg->intvars [pos] = res->inst_c0;
1267 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1273 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1276 * Don't use this if a generic_context is set, since that means AOT can't
1277 * look up the method using just the image+token.
1278 * table == 0 means this is a reference made from a wrapper.
1280 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1281 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1282 jump_info_token->image = image;
1283 jump_info_token->token = token;
1284 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1289 * This function is called to handle items that are left on the evaluation stack
1290 * at basic block boundaries. What happens is that we save the values to local variables
1291 * and we reload them later when first entering the target basic block (with the
1292 * handle_loaded_temps () function).
1293 * A single joint point will use the same variables (stored in the array bb->out_stack or
1294 * bb->in_stack, if the basic block is before or after the joint point).
1296 * This function needs to be called _before_ emitting the last instruction of
1297 * the bb (i.e. before emitting a branch).
1298 * If the stack merge fails at a join point, cfg->unverifiable is set.
1301 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1304 MonoBasicBlock *bb = cfg->cbb;
1305 MonoBasicBlock *outb;
1306 MonoInst *inst, **locals;
1311 if (cfg->verbose_level > 3)
1312 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1313 if (!bb->out_scount) {
1314 bb->out_scount = count;
1315 //printf ("bblock %d has out:", bb->block_num);
1317 for (i = 0; i < bb->out_count; ++i) {
1318 outb = bb->out_bb [i];
1319 /* exception handlers are linked, but they should not be considered for stack args */
1320 if (outb->flags & BB_EXCEPTION_HANDLER)
1322 //printf (" %d", outb->block_num);
1323 if (outb->in_stack) {
1325 bb->out_stack = outb->in_stack;
1331 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1332 for (i = 0; i < count; ++i) {
1334 * try to reuse temps already allocated for this purpouse, if they occupy the same
1335 * stack slot and if they are of the same type.
1336 * This won't cause conflicts since if 'local' is used to
1337 * store one of the values in the in_stack of a bblock, then
1338 * the same variable will be used for the same outgoing stack
1340 * This doesn't work when inlining methods, since the bblocks
1341 * in the inlined methods do not inherit their in_stack from
1342 * the bblock they are inlined to. See bug #58863 for an
1345 if (cfg->inlined_method)
1346 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1348 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1353 for (i = 0; i < bb->out_count; ++i) {
1354 outb = bb->out_bb [i];
1355 /* exception handlers are linked, but they should not be considered for stack args */
1356 if (outb->flags & BB_EXCEPTION_HANDLER)
1358 if (outb->in_scount) {
1359 if (outb->in_scount != bb->out_scount) {
1360 cfg->unverifiable = TRUE;
1363 continue; /* check they are the same locals */
1365 outb->in_scount = count;
1366 outb->in_stack = bb->out_stack;
1369 locals = bb->out_stack;
1371 for (i = 0; i < count; ++i) {
1372 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1373 inst->cil_code = sp [i]->cil_code;
1374 sp [i] = locals [i];
1375 if (cfg->verbose_level > 3)
1376 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1380 * It is possible that the out bblocks already have in_stack assigned, and
1381 * the in_stacks differ. In this case, we will store to all the different
1388 /* Find a bblock which has a different in_stack */
1390 while (bindex < bb->out_count) {
1391 outb = bb->out_bb [bindex];
1392 /* exception handlers are linked, but they should not be considered for stack args */
1393 if (outb->flags & BB_EXCEPTION_HANDLER) {
1397 if (outb->in_stack != locals) {
1398 for (i = 0; i < count; ++i) {
1399 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1400 inst->cil_code = sp [i]->cil_code;
1401 sp [i] = locals [i];
1402 if (cfg->verbose_level > 3)
1403 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1405 locals = outb->in_stack;
1414 /* Emit code which loads interface_offsets [klass->interface_id]
1415 * The array is stored in memory before vtable.
1418 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1420 if (cfg->compile_aot) {
1421 int ioffset_reg = alloc_preg (cfg);
1422 int iid_reg = alloc_preg (cfg);
1424 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1425 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1426 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1429 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1434 mini_emit_interface_bitmap_check (MonoCompile *cfg, int intf_bit_reg, int base_reg, int offset, MonoClass *klass)
1436 int ibitmap_reg = alloc_preg (cfg);
1437 #ifdef COMPRESSED_INTERFACE_BITMAP
1439 MonoInst *res, *ins;
1440 NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, ibitmap_reg, base_reg, offset);
1441 MONO_ADD_INS (cfg->cbb, ins);
1443 if (cfg->compile_aot)
1444 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_IID, klass);
1446 EMIT_NEW_ICONST (cfg, args [1], klass->interface_id);
1447 res = mono_emit_jit_icall (cfg, mono_class_interface_match, args);
1448 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, intf_bit_reg, res->dreg);
1450 int ibitmap_byte_reg = alloc_preg (cfg);
1452 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, base_reg, offset);
1454 if (cfg->compile_aot) {
1455 int iid_reg = alloc_preg (cfg);
1456 int shifted_iid_reg = alloc_preg (cfg);
1457 int ibitmap_byte_address_reg = alloc_preg (cfg);
1458 int masked_iid_reg = alloc_preg (cfg);
1459 int iid_one_bit_reg = alloc_preg (cfg);
1460 int iid_bit_reg = alloc_preg (cfg);
1461 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1462 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1463 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1464 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1465 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1466 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1467 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1468 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1470 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1471 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1477 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1478 * stored in "klass_reg" implements the interface "klass".
1481 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1483 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, interface_bitmap), klass);
1487 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1488 * stored in "vtable_reg" implements the interface "klass".
1491 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1493 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, interface_bitmap), klass);
1497 * Emit code which checks whenever the interface id of @klass is smaller than
1498 * than the value given by max_iid_reg.
1501 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1502 MonoBasicBlock *false_target)
1504 if (cfg->compile_aot) {
1505 int iid_reg = alloc_preg (cfg);
1506 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1507 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1510 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1512 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1514 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1517 /* Same as above, but obtains max_iid from a vtable */
1519 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1520 MonoBasicBlock *false_target)
1522 int max_iid_reg = alloc_preg (cfg);
1524 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, max_interface_id));
1525 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1528 /* Same as above, but obtains max_iid from a klass */
1530 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1531 MonoBasicBlock *false_target)
1533 int max_iid_reg = alloc_preg (cfg);
1535 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, max_interface_id));
1536 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1540 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1542 int idepth_reg = alloc_preg (cfg);
1543 int stypes_reg = alloc_preg (cfg);
1544 int stype = alloc_preg (cfg);
1546 mono_class_setup_supertypes (klass);
1548 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1549 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1550 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1551 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1553 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1554 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1556 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1557 } else if (cfg->compile_aot) {
1558 int const_reg = alloc_preg (cfg);
1559 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1560 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1562 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1564 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1568 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1570 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1574 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1576 int intf_reg = alloc_preg (cfg);
1578 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1579 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1580 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1582 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1584 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1588 * Variant of the above that takes a register to the class, not the vtable.
1591 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1593 int intf_bit_reg = alloc_preg (cfg);
1595 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1596 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1597 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1599 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1601 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1605 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1608 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1609 } else if (cfg->compile_aot) {
1610 int const_reg = alloc_preg (cfg);
1611 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1612 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1614 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1616 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1620 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1622 mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1626 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1628 if (cfg->compile_aot) {
1629 int const_reg = alloc_preg (cfg);
1630 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1631 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1633 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1635 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1639 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1642 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1645 int rank_reg = alloc_preg (cfg);
1646 int eclass_reg = alloc_preg (cfg);
1648 g_assert (!klass_inst);
1649 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, rank));
1650 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1651 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1652 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1653 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
1654 if (klass->cast_class == mono_defaults.object_class) {
1655 int parent_reg = alloc_preg (cfg);
1656 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
1657 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1658 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1659 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1660 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1661 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1662 } else if (klass->cast_class == mono_defaults.enum_class) {
1663 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1664 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1665 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1667 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1668 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1671 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1672 /* Check that the object is a vector too */
1673 int bounds_reg = alloc_preg (cfg);
1674 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
1675 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1676 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1679 int idepth_reg = alloc_preg (cfg);
1680 int stypes_reg = alloc_preg (cfg);
1681 int stype = alloc_preg (cfg);
1683 mono_class_setup_supertypes (klass);
1685 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1686 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1687 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1688 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1690 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1691 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1692 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1697 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1699 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1703 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1707 g_assert (val == 0);
1712 if ((size <= 4) && (size <= align)) {
1715 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1718 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1721 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1723 #if SIZEOF_REGISTER == 8
1725 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1731 val_reg = alloc_preg (cfg);
1733 if (SIZEOF_REGISTER == 8)
1734 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1736 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1739 /* This could be optimized further if neccesary */
1741 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1748 #if !NO_UNALIGNED_ACCESS
1749 if (SIZEOF_REGISTER == 8) {
1751 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1756 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1764 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1769 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1774 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1781 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1788 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1789 g_assert (size < 10000);
1792 /* This could be optimized further if neccesary */
1794 cur_reg = alloc_preg (cfg);
1795 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1796 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1803 #if !NO_UNALIGNED_ACCESS
1804 if (SIZEOF_REGISTER == 8) {
1806 cur_reg = alloc_preg (cfg);
1807 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1808 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1817 cur_reg = alloc_preg (cfg);
1818 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1819 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1825 cur_reg = alloc_preg (cfg);
1826 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1827 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1833 cur_reg = alloc_preg (cfg);
1834 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1835 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1843 ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
1846 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1849 type = mini_get_basic_type_from_generic (gsctx, type);
1850 switch (type->type) {
1851 case MONO_TYPE_VOID:
1852 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALLVIRT: OP_VOIDCALL;
1855 case MONO_TYPE_BOOLEAN:
1858 case MONO_TYPE_CHAR:
1861 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1865 case MONO_TYPE_FNPTR:
1866 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1867 case MONO_TYPE_CLASS:
1868 case MONO_TYPE_STRING:
1869 case MONO_TYPE_OBJECT:
1870 case MONO_TYPE_SZARRAY:
1871 case MONO_TYPE_ARRAY:
1872 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1875 return calli? OP_LCALL_REG: virt? OP_LCALLVIRT: OP_LCALL;
1878 return calli? OP_FCALL_REG: virt? OP_FCALLVIRT: OP_FCALL;
1879 case MONO_TYPE_VALUETYPE:
1880 if (type->data.klass->enumtype) {
1881 type = mono_class_enum_basetype (type->data.klass);
1884 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1885 case MONO_TYPE_TYPEDBYREF:
1886 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1887 case MONO_TYPE_GENERICINST:
1888 type = &type->data.generic_class->container_class->byval_arg;
1891 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
1897 * target_type_is_incompatible:
1898 * @cfg: MonoCompile context
1900 * Check that the item @arg on the evaluation stack can be stored
1901 * in the target type (can be a local, or field, etc).
1902 * The cfg arg can be used to check if we need verification or just
1905 * Returns: non-0 value if arg can't be stored on a target.
1908 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
1910 MonoType *simple_type;
1913 if (target->byref) {
1914 /* FIXME: check that the pointed to types match */
1915 if (arg->type == STACK_MP)
1916 return arg->klass != mono_class_from_mono_type (target);
1917 if (arg->type == STACK_PTR)
1922 simple_type = mono_type_get_underlying_type (target);
1923 switch (simple_type->type) {
1924 case MONO_TYPE_VOID:
1928 case MONO_TYPE_BOOLEAN:
1931 case MONO_TYPE_CHAR:
1934 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1938 /* STACK_MP is needed when setting pinned locals */
1939 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1944 case MONO_TYPE_FNPTR:
1946 * Some opcodes like ldloca returns 'transient pointers' which can be stored in
1947 * in native int. (#688008).
1949 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1952 case MONO_TYPE_CLASS:
1953 case MONO_TYPE_STRING:
1954 case MONO_TYPE_OBJECT:
1955 case MONO_TYPE_SZARRAY:
1956 case MONO_TYPE_ARRAY:
1957 if (arg->type != STACK_OBJ)
1959 /* FIXME: check type compatibility */
1963 if (arg->type != STACK_I8)
1968 if (arg->type != STACK_R8)
1971 case MONO_TYPE_VALUETYPE:
1972 if (arg->type != STACK_VTYPE)
1974 klass = mono_class_from_mono_type (simple_type);
1975 if (klass != arg->klass)
1978 case MONO_TYPE_TYPEDBYREF:
1979 if (arg->type != STACK_VTYPE)
1981 klass = mono_class_from_mono_type (simple_type);
1982 if (klass != arg->klass)
1985 case MONO_TYPE_GENERICINST:
1986 if (mono_type_generic_inst_is_valuetype (simple_type)) {
1987 if (arg->type != STACK_VTYPE)
1989 klass = mono_class_from_mono_type (simple_type);
1990 if (klass != arg->klass)
1994 if (arg->type != STACK_OBJ)
1996 /* FIXME: check type compatibility */
2000 case MONO_TYPE_MVAR:
2001 /* FIXME: all the arguments must be references for now,
2002 * later look inside cfg and see if the arg num is
2003 * really a reference
2005 g_assert (cfg->generic_sharing_context);
2006 if (arg->type != STACK_OBJ)
2010 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
2016 * Prepare arguments for passing to a function call.
2017 * Return a non-zero value if the arguments can't be passed to the given
2019 * The type checks are not yet complete and some conversions may need
2020 * casts on 32 or 64 bit architectures.
2022 * FIXME: implement this using target_type_is_incompatible ()
2025 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
2027 MonoType *simple_type;
2031 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
2035 for (i = 0; i < sig->param_count; ++i) {
2036 if (sig->params [i]->byref) {
2037 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
2041 simple_type = sig->params [i];
2042 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
2044 switch (simple_type->type) {
2045 case MONO_TYPE_VOID:
2050 case MONO_TYPE_BOOLEAN:
2053 case MONO_TYPE_CHAR:
2056 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2062 case MONO_TYPE_FNPTR:
2063 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2066 case MONO_TYPE_CLASS:
2067 case MONO_TYPE_STRING:
2068 case MONO_TYPE_OBJECT:
2069 case MONO_TYPE_SZARRAY:
2070 case MONO_TYPE_ARRAY:
2071 if (args [i]->type != STACK_OBJ)
2076 if (args [i]->type != STACK_I8)
2081 if (args [i]->type != STACK_R8)
2084 case MONO_TYPE_VALUETYPE:
2085 if (simple_type->data.klass->enumtype) {
2086 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2089 if (args [i]->type != STACK_VTYPE)
2092 case MONO_TYPE_TYPEDBYREF:
2093 if (args [i]->type != STACK_VTYPE)
2096 case MONO_TYPE_GENERICINST:
2097 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2101 g_error ("unknown type 0x%02x in check_call_signature",
2109 callvirt_to_call (int opcode)
2114 case OP_VOIDCALLVIRT:
2123 g_assert_not_reached ();
2130 callvirt_to_call_membase (int opcode)
2134 return OP_CALL_MEMBASE;
2135 case OP_VOIDCALLVIRT:
2136 return OP_VOIDCALL_MEMBASE;
2138 return OP_FCALL_MEMBASE;
2140 return OP_LCALL_MEMBASE;
2142 return OP_VCALL_MEMBASE;
2144 g_assert_not_reached ();
2150 #ifdef MONO_ARCH_HAVE_IMT
2152 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
2156 if (COMPILE_LLVM (cfg)) {
2157 method_reg = alloc_preg (cfg);
2160 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2161 } else if (cfg->compile_aot) {
2162 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
2165 MONO_INST_NEW (cfg, ins, OP_PCONST);
2166 ins->inst_p0 = call->method;
2167 ins->dreg = method_reg;
2168 MONO_ADD_INS (cfg->cbb, ins);
2172 call->imt_arg_reg = method_reg;
2174 #ifdef MONO_ARCH_IMT_REG
2175 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2177 /* Need this to keep the IMT arg alive */
2178 mono_call_inst_add_outarg_reg (cfg, call, method_reg, 0, FALSE);
2183 #ifdef MONO_ARCH_IMT_REG
2184 method_reg = alloc_preg (cfg);
2187 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2188 } else if (cfg->compile_aot) {
2189 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
2192 MONO_INST_NEW (cfg, ins, OP_PCONST);
2193 ins->inst_p0 = call->method;
2194 ins->dreg = method_reg;
2195 MONO_ADD_INS (cfg->cbb, ins);
2198 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2200 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2205 static MonoJumpInfo *
2206 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2208 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2212 ji->data.target = target;
2217 inline static MonoCallInst *
2218 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2219 MonoInst **args, int calli, int virtual, int tail, int rgctx)
2222 #ifdef MONO_ARCH_SOFT_FLOAT
2227 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2229 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
2232 call->signature = sig;
2233 call->rgctx_reg = rgctx;
2235 type_to_eval_stack_type ((cfg), sig->ret, &call->inst);
2238 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2239 call->vret_var = cfg->vret_addr;
2240 //g_assert_not_reached ();
2242 } else if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2243 MonoInst *temp = mono_compile_create_var (cfg, sig->ret, OP_LOCAL);
2246 temp->backend.is_pinvoke = sig->pinvoke;
2249 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2250 * address of return value to increase optimization opportunities.
2251 * Before vtype decomposition, the dreg of the call ins itself represents the
2252 * fact the call modifies the return value. After decomposition, the call will
2253 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2254 * will be transformed into an LDADDR.
2256 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2257 loada->dreg = alloc_preg (cfg);
2258 loada->inst_p0 = temp;
2259 /* We reference the call too since call->dreg could change during optimization */
2260 loada->inst_p1 = call;
2261 MONO_ADD_INS (cfg->cbb, loada);
2263 call->inst.dreg = temp->dreg;
2265 call->vret_var = loada;
2266 } else if (!MONO_TYPE_IS_VOID (sig->ret))
2267 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2269 #ifdef MONO_ARCH_SOFT_FLOAT
2270 if (COMPILE_SOFT_FLOAT (cfg)) {
2272 * If the call has a float argument, we would need to do an r8->r4 conversion using
2273 * an icall, but that cannot be done during the call sequence since it would clobber
2274 * the call registers + the stack. So we do it before emitting the call.
2276 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2278 MonoInst *in = call->args [i];
2280 if (i >= sig->hasthis)
2281 t = sig->params [i - sig->hasthis];
2283 t = &mono_defaults.int_class->byval_arg;
2284 t = mono_type_get_underlying_type (t);
2286 if (!t->byref && t->type == MONO_TYPE_R4) {
2287 MonoInst *iargs [1];
2291 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2293 /* The result will be in an int vreg */
2294 call->args [i] = conv;
2301 if (COMPILE_LLVM (cfg))
2302 mono_llvm_emit_call (cfg, call);
2304 mono_arch_emit_call (cfg, call);
2306 mono_arch_emit_call (cfg, call);
2309 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2310 cfg->flags |= MONO_CFG_HAS_CALLS;
2316 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2318 #ifdef MONO_ARCH_RGCTX_REG
2319 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2320 cfg->uses_rgctx_reg = TRUE;
2321 call->rgctx_reg = TRUE;
2323 call->rgctx_arg_reg = rgctx_reg;
2330 inline static MonoInst*
2331 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *rgctx_arg)
2337 rgctx_reg = mono_alloc_preg (cfg);
2338 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2341 call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE, rgctx_arg ? TRUE : FALSE);
2343 call->inst.sreg1 = addr->dreg;
2345 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2348 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2350 return (MonoInst*)call;
2354 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type);
2356 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2359 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2360 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *rgctx_arg)
2362 gboolean might_be_remote;
2363 gboolean virtual = this != NULL;
2364 gboolean enable_for_aot = TRUE;
2370 rgctx_reg = mono_alloc_preg (cfg);
2371 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2374 if (method->string_ctor) {
2375 /* Create the real signature */
2376 /* FIXME: Cache these */
2377 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2378 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2383 context_used = mono_method_check_context_used (method);
2385 might_be_remote = this && sig->hasthis &&
2386 (method->klass->marshalbyref || method->klass == mono_defaults.object_class) &&
2387 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && (!MONO_CHECK_THIS (this) || context_used);
2389 if (might_be_remote && context_used) {
2392 g_assert (cfg->generic_sharing_context);
2394 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2396 return mono_emit_calli (cfg, sig, args, addr, NULL);
2399 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, FALSE, rgctx_arg ? TRUE : FALSE);
2401 if (might_be_remote)
2402 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2404 call->method = method;
2405 call->inst.flags |= MONO_INST_HAS_METHOD;
2406 call->inst.inst_left = this;
2409 int vtable_reg, slot_reg, this_reg;
2411 this_reg = this->dreg;
2413 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
2414 if ((method->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (method->name, "Invoke"))) {
2415 MonoInst *dummy_use;
2417 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2419 /* Make a call to delegate->invoke_impl */
2420 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2421 call->inst.inst_basereg = this_reg;
2422 call->inst.inst_offset = G_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2423 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2425 /* We must emit a dummy use here because the delegate trampoline will
2426 replace the 'this' argument with the delegate target making this activation
2427 no longer a root for the delegate.
2428 This is an issue for delegates that target collectible code such as dynamic
2429 methods of GC'able assemblies.
2431 For a test case look into #667921.
2433 FIXME: a dummy use is not the best way to do it as the local register allocator
2434 will put it on a caller save register and spil it around the call.
2435 Ideally, we would either put it on a callee save register or only do the store part.
2437 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [0]);
2439 return (MonoInst*)call;
2443 if ((!cfg->compile_aot || enable_for_aot) &&
2444 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2445 (MONO_METHOD_IS_FINAL (method) &&
2446 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2447 !(method->klass->marshalbyref && context_used)) {
2449 * the method is not virtual, we just need to ensure this is not null
2450 * and then we can call the method directly.
2452 if (method->klass->marshalbyref || method->klass == mono_defaults.object_class) {
2454 * The check above ensures method is not gshared, this is needed since
2455 * gshared methods can't have wrappers.
2457 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2460 if (!method->string_ctor)
2461 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2463 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2464 } else if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2466 * the method is virtual, but we can statically dispatch since either
2467 * it's class or the method itself are sealed.
2468 * But first we need to ensure it's not a null reference.
2470 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2472 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2474 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2476 vtable_reg = alloc_preg (cfg);
2477 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2478 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2480 #ifdef MONO_ARCH_HAVE_IMT
2482 guint32 imt_slot = mono_method_get_imt_slot (method);
2483 emit_imt_argument (cfg, call, imt_arg);
2484 slot_reg = vtable_reg;
2485 call->inst.inst_offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2488 if (slot_reg == -1) {
2489 slot_reg = alloc_preg (cfg);
2490 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2491 call->inst.inst_offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
2494 slot_reg = vtable_reg;
2495 call->inst.inst_offset = G_STRUCT_OFFSET (MonoVTable, vtable) +
2496 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2497 #ifdef MONO_ARCH_HAVE_IMT
2499 g_assert (mono_method_signature (method)->generic_param_count);
2500 emit_imt_argument (cfg, call, imt_arg);
2505 call->inst.sreg1 = slot_reg;
2506 call->virtual = TRUE;
2510 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2513 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2515 return (MonoInst*)call;
2519 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2521 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), args, this, NULL, NULL);
2525 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2532 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE, FALSE);
2535 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2537 return (MonoInst*)call;
2541 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2543 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2547 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2551 * mono_emit_abs_call:
2553 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2555 inline static MonoInst*
2556 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2557 MonoMethodSignature *sig, MonoInst **args)
2559 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2563 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2566 if (cfg->abs_patches == NULL)
2567 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2568 g_hash_table_insert (cfg->abs_patches, ji, ji);
2569 ins = mono_emit_native_call (cfg, ji, sig, args);
2570 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2575 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2577 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2578 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2582 * Native code might return non register sized integers
2583 * without initializing the upper bits.
2585 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
2586 case OP_LOADI1_MEMBASE:
2587 widen_op = OP_ICONV_TO_I1;
2589 case OP_LOADU1_MEMBASE:
2590 widen_op = OP_ICONV_TO_U1;
2592 case OP_LOADI2_MEMBASE:
2593 widen_op = OP_ICONV_TO_I2;
2595 case OP_LOADU2_MEMBASE:
2596 widen_op = OP_ICONV_TO_U2;
2602 if (widen_op != -1) {
2603 int dreg = alloc_preg (cfg);
2606 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
2607 widen->type = ins->type;
2617 get_memcpy_method (void)
2619 static MonoMethod *memcpy_method = NULL;
2620 if (!memcpy_method) {
2621 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2623 g_error ("Old corlib found. Install a new one");
2625 return memcpy_method;
2629 create_write_barrier_bitmap (MonoCompile *cfg, MonoClass *klass, unsigned *wb_bitmap, int offset)
2631 MonoClassField *field;
2632 gpointer iter = NULL;
2634 while ((field = mono_class_get_fields (klass, &iter))) {
2637 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
2639 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
2640 if (mini_type_is_reference (cfg, mono_field_get_type (field))) {
2641 g_assert ((foffset % SIZEOF_VOID_P) == 0);
2642 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
2644 MonoClass *field_class = mono_class_from_mono_type (field->type);
2645 if (field_class->has_references)
2646 create_write_barrier_bitmap (cfg, field_class, wb_bitmap, offset + foffset);
2652 emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value, int value_reg)
2654 int card_table_shift_bits;
2655 gpointer card_table_mask;
2657 MonoInst *dummy_use;
2658 int nursery_shift_bits;
2659 size_t nursery_size;
2660 gboolean has_card_table_wb = FALSE;
2662 if (!cfg->gen_write_barriers)
2665 card_table = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
2667 mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
2669 #ifdef MONO_ARCH_HAVE_CARD_TABLE_WBARRIER
2670 has_card_table_wb = TRUE;
2673 if (has_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0) {
2676 MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
2677 wbarrier->sreg1 = ptr->dreg;
2679 wbarrier->sreg2 = value->dreg;
2681 wbarrier->sreg2 = value_reg;
2682 MONO_ADD_INS (cfg->cbb, wbarrier);
2683 } else if (card_table) {
2684 int offset_reg = alloc_preg (cfg);
2685 int card_reg = alloc_preg (cfg);
2688 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
2689 if (card_table_mask)
2690 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
2692 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
2693 * IMM's larger than 32bits.
2695 if (cfg->compile_aot) {
2696 MONO_EMIT_NEW_AOTCONST (cfg, card_reg, NULL, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR);
2698 MONO_INST_NEW (cfg, ins, OP_PCONST);
2699 ins->inst_p0 = card_table;
2700 ins->dreg = card_reg;
2701 MONO_ADD_INS (cfg->cbb, ins);
2704 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
2705 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
2707 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
2708 mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
2712 EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
2714 MONO_INST_NEW (cfg, dummy_use, OP_DUMMY_USE);
2715 dummy_use->sreg1 = value_reg;
2716 MONO_ADD_INS (cfg->cbb, dummy_use);
2721 mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
2723 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
2724 unsigned need_wb = 0;
2729 /*types with references can't have alignment smaller than sizeof(void*) */
2730 if (align < SIZEOF_VOID_P)
2733 /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
2734 if (size > 32 * SIZEOF_VOID_P)
2737 create_write_barrier_bitmap (cfg, klass, &need_wb, 0);
2739 /* We don't unroll more than 5 stores to avoid code bloat. */
2740 if (size > 5 * SIZEOF_VOID_P) {
2741 /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
2742 size += (SIZEOF_VOID_P - 1);
2743 size &= ~(SIZEOF_VOID_P - 1);
2745 EMIT_NEW_ICONST (cfg, iargs [2], size);
2746 EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
2747 mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
2751 destreg = iargs [0]->dreg;
2752 srcreg = iargs [1]->dreg;
2755 dest_ptr_reg = alloc_preg (cfg);
2756 tmp_reg = alloc_preg (cfg);
2759 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
2761 while (size >= SIZEOF_VOID_P) {
2762 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, tmp_reg, srcreg, offset);
2763 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
2766 emit_write_barrier (cfg, iargs [0], NULL, tmp_reg);
2768 offset += SIZEOF_VOID_P;
2769 size -= SIZEOF_VOID_P;
2772 /*tmp += sizeof (void*)*/
2773 if (size >= SIZEOF_VOID_P) {
2774 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
2775 MONO_ADD_INS (cfg->cbb, iargs [0]);
2779 /* Those cannot be references since size < sizeof (void*) */
2781 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
2782 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
2788 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
2789 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
2795 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
2796 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
2805 * Emit code to copy a valuetype of type @klass whose address is stored in
2806 * @src->dreg to memory whose address is stored at @dest->dreg.
2809 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
2811 MonoInst *iargs [4];
2814 MonoMethod *memcpy_method;
2818 * This check breaks with spilled vars... need to handle it during verification anyway.
2819 * g_assert (klass && klass == src->klass && klass == dest->klass);
2823 n = mono_class_native_size (klass, &align);
2825 n = mono_class_value_size (klass, &align);
2827 /* if native is true there should be no references in the struct */
2828 if (cfg->gen_write_barriers && klass->has_references && !native) {
2829 /* Avoid barriers when storing to the stack */
2830 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
2831 (dest->opcode == OP_LDADDR))) {
2832 int context_used = 0;
2837 if (cfg->generic_sharing_context)
2838 context_used = mono_class_check_context_used (klass);
2840 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
2841 if ((cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
2843 } else if (context_used) {
2844 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
2846 if (cfg->compile_aot) {
2847 EMIT_NEW_CLASSCONST (cfg, iargs [2], klass);
2849 EMIT_NEW_PCONST (cfg, iargs [2], klass);
2850 mono_class_compute_gc_descriptor (klass);
2854 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
2859 if ((cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
2860 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
2861 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
2865 EMIT_NEW_ICONST (cfg, iargs [2], n);
2867 memcpy_method = get_memcpy_method ();
2868 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
2873 get_memset_method (void)
2875 static MonoMethod *memset_method = NULL;
2876 if (!memset_method) {
2877 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
2879 g_error ("Old corlib found. Install a new one");
2881 return memset_method;
2885 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
2887 MonoInst *iargs [3];
2890 MonoMethod *memset_method;
2892 /* FIXME: Optimize this for the case when dest is an LDADDR */
2894 mono_class_init (klass);
2895 n = mono_class_value_size (klass, &align);
2897 if (n <= sizeof (gpointer) * 5) {
2898 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
2901 memset_method = get_memset_method ();
2903 EMIT_NEW_ICONST (cfg, iargs [1], 0);
2904 EMIT_NEW_ICONST (cfg, iargs [2], n);
2905 mono_emit_method_call (cfg, memset_method, iargs, NULL);
2910 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
2912 MonoInst *this = NULL;
2914 g_assert (cfg->generic_sharing_context);
2916 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
2917 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
2918 !method->klass->valuetype)
2919 EMIT_NEW_ARGLOAD (cfg, this, 0);
2921 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
2922 MonoInst *mrgctx_loc, *mrgctx_var;
2925 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
2927 mrgctx_loc = mono_get_vtable_var (cfg);
2928 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
2931 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
2932 MonoInst *vtable_loc, *vtable_var;
2936 vtable_loc = mono_get_vtable_var (cfg);
2937 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
2939 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
2940 MonoInst *mrgctx_var = vtable_var;
2943 vtable_reg = alloc_preg (cfg);
2944 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, G_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
2945 vtable_var->type = STACK_PTR;
2953 vtable_reg = alloc_preg (cfg);
2954 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2959 static MonoJumpInfoRgctxEntry *
2960 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, MonoRgctxInfoType info_type)
2962 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
2963 res->method = method;
2964 res->in_mrgctx = in_mrgctx;
2965 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
2966 res->data->type = patch_type;
2967 res->data->data.target = patch_data;
2968 res->info_type = info_type;
2973 static inline MonoInst*
2974 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
2976 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
2980 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
2981 MonoClass *klass, MonoRgctxInfoType rgctx_type)
2983 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
2984 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2986 return emit_rgctx_fetch (cfg, rgctx, entry);
2990 * emit_get_rgctx_method:
2992 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
2993 * normal constants, else emit a load from the rgctx.
2996 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
2997 MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
2999 if (!context_used) {
3002 switch (rgctx_type) {
3003 case MONO_RGCTX_INFO_METHOD:
3004 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
3006 case MONO_RGCTX_INFO_METHOD_RGCTX:
3007 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
3010 g_assert_not_reached ();
3013 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
3014 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3016 return emit_rgctx_fetch (cfg, rgctx, entry);
3021 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
3022 MonoClassField *field, MonoRgctxInfoType rgctx_type)
3024 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
3025 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3027 return emit_rgctx_fetch (cfg, rgctx, entry);
3031 * On return the caller must check @klass for load errors.
3034 emit_generic_class_init (MonoCompile *cfg, MonoClass *klass)
3036 MonoInst *vtable_arg;
3038 int context_used = 0;
3040 if (cfg->generic_sharing_context)
3041 context_used = mono_class_check_context_used (klass);
3044 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
3045 klass, MONO_RGCTX_INFO_VTABLE);
3047 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3051 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
3054 if (COMPILE_LLVM (cfg))
3055 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline_llvm, &vtable_arg);
3057 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable_arg);
3058 #ifdef MONO_ARCH_VTABLE_REG
3059 mono_call_inst_add_outarg_reg (cfg, call, vtable_arg->dreg, MONO_ARCH_VTABLE_REG, FALSE);
3060 cfg->uses_vtable_reg = TRUE;
3067 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg)
3069 if (mini_get_debug_options ()->better_cast_details) {
3070 int to_klass_reg = alloc_preg (cfg);
3071 int vtable_reg = alloc_preg (cfg);
3072 int klass_reg = alloc_preg (cfg);
3073 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3076 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3080 MONO_ADD_INS (cfg->cbb, tls_get);
3081 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3082 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3084 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3085 MONO_EMIT_NEW_PCONST (cfg, to_klass_reg, klass);
3086 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3091 reset_cast_details (MonoCompile *cfg)
3093 /* Reset the variables holding the cast details */
3094 if (mini_get_debug_options ()->better_cast_details) {
3095 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3097 MONO_ADD_INS (cfg->cbb, tls_get);
3098 /* It is enough to reset the from field */
3099 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3104 * On return the caller must check @array_class for load errors
3107 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
3109 int vtable_reg = alloc_preg (cfg);
3110 int context_used = 0;
3112 if (cfg->generic_sharing_context)
3113 context_used = mono_class_check_context_used (array_class);
3115 save_cast_details (cfg, array_class, obj->dreg);
3117 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3119 if (cfg->opt & MONO_OPT_SHARED) {
3120 int class_reg = alloc_preg (cfg);
3121 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3122 if (cfg->compile_aot) {
3123 int klass_reg = alloc_preg (cfg);
3124 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
3125 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
3127 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
3129 } else if (context_used) {
3130 MonoInst *vtable_ins;
3132 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
3133 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
3135 if (cfg->compile_aot) {
3139 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3141 vt_reg = alloc_preg (cfg);
3142 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
3143 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
3146 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3148 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
3152 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
3154 reset_cast_details (cfg);
3158 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
3159 * generic code is generated.
3162 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
3164 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
3167 MonoInst *rgctx, *addr;
3169 /* FIXME: What if the class is shared? We might not
3170 have to get the address of the method from the
3172 addr = emit_get_rgctx_method (cfg, context_used, method,
3173 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3175 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3177 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
3179 return mono_emit_method_call (cfg, method, &val, NULL);
3184 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
3188 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
3189 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
3190 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
3191 int rank_reg = alloc_dreg (cfg ,STACK_I4);
3193 obj_reg = sp [0]->dreg;
3194 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3195 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3197 /* FIXME: generics */
3198 g_assert (klass->rank == 0);
3201 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
3202 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3204 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3205 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, element_class));
3208 MonoInst *element_class;
3210 /* This assertion is from the unboxcast insn */
3211 g_assert (klass->rank == 0);
3213 element_class = emit_get_rgctx_klass (cfg, context_used,
3214 klass->element_class, MONO_RGCTX_INFO_KLASS);
3216 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
3217 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3219 save_cast_details (cfg, klass->element_class, obj_reg);
3220 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
3221 reset_cast_details (cfg);
3224 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), obj_reg, sizeof (MonoObject));
3225 MONO_ADD_INS (cfg->cbb, add);
3226 add->type = STACK_MP;
3233 * Returns NULL and set the cfg exception on error.
3236 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
3238 MonoInst *iargs [2];
3244 MonoInst *iargs [2];
3247 FIXME: we cannot get managed_alloc here because we can't get
3248 the class's vtable (because it's not a closed class)
3250 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3251 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
3254 if (cfg->opt & MONO_OPT_SHARED)
3255 rgctx_info = MONO_RGCTX_INFO_KLASS;
3257 rgctx_info = MONO_RGCTX_INFO_VTABLE;
3258 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
3260 if (cfg->opt & MONO_OPT_SHARED) {
3261 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3263 alloc_ftn = mono_object_new;
3266 alloc_ftn = mono_object_new_specific;
3269 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3272 if (cfg->opt & MONO_OPT_SHARED) {
3273 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3274 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
3276 alloc_ftn = mono_object_new;
3277 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
3278 /* This happens often in argument checking code, eg. throw new FooException... */
3279 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
3280 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
3281 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
3283 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3284 MonoMethod *managed_alloc = NULL;
3288 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
3289 cfg->exception_ptr = klass;
3293 #ifndef MONO_CROSS_COMPILE
3294 managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
3297 if (managed_alloc) {
3298 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3299 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3301 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
3303 guint32 lw = vtable->klass->instance_size;
3304 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
3305 EMIT_NEW_ICONST (cfg, iargs [0], lw);
3306 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
3309 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3313 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3317 * Returns NULL and set the cfg exception on error.
3320 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used)
3322 MonoInst *alloc, *ins;
3324 if (mono_class_is_nullable (klass)) {
3325 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
3328 /* FIXME: What if the class is shared? We might not
3329 have to get the method address from the RGCTX. */
3330 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
3331 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3332 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3334 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
3336 return mono_emit_method_call (cfg, method, &val, NULL);
3340 alloc = handle_alloc (cfg, klass, TRUE, context_used);
3344 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3351 mini_class_has_reference_variant_generic_argument (MonoCompile *cfg, MonoClass *klass, int context_used)
3354 MonoGenericContainer *container;
3355 MonoGenericInst *ginst;
3357 if (klass->generic_class) {
3358 container = klass->generic_class->container_class->generic_container;
3359 ginst = klass->generic_class->context.class_inst;
3360 } else if (klass->generic_container && context_used) {
3361 container = klass->generic_container;
3362 ginst = container->context.class_inst;
3367 for (i = 0; i < container->type_argc; ++i) {
3369 if (!(mono_generic_container_get_param_info (container, i)->flags & (MONO_GEN_PARAM_VARIANT|MONO_GEN_PARAM_COVARIANT)))
3371 type = ginst->type_argv [i];
3372 if (mini_type_is_reference (cfg, type))
3378 // FIXME: This doesn't work yet (class libs tests fail?)
3379 #define is_complex_isinst(klass) (TRUE || (klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || klass->marshalbyref || (klass->flags & TYPE_ATTRIBUTE_SEALED) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
3382 * Returns NULL and set the cfg exception on error.
3385 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3387 MonoBasicBlock *is_null_bb;
3388 int obj_reg = src->dreg;
3389 int vtable_reg = alloc_preg (cfg);
3390 MonoInst *klass_inst = NULL;
3395 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
3396 MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
3397 MonoInst *cache_ins;
3399 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
3404 /* klass - it's the second element of the cache entry*/
3405 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
3408 args [2] = cache_ins;
3410 return mono_emit_method_call (cfg, mono_castclass, args, NULL);
3413 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3415 if (is_complex_isinst (klass)) {
3416 /* Complex case, handle by an icall */
3422 args [1] = klass_inst;
3424 return mono_emit_jit_icall (cfg, mono_object_castclass, args);
3426 /* Simple case, handled by the code below */
3430 NEW_BBLOCK (cfg, is_null_bb);
3432 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3433 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3435 save_cast_details (cfg, klass, obj_reg);
3437 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3438 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3439 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
3441 int klass_reg = alloc_preg (cfg);
3443 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3445 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3446 /* the remoting code is broken, access the class for now */
3447 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3448 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3450 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
3451 cfg->exception_ptr = klass;
3454 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3456 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3457 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3459 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3461 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3462 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
3466 MONO_START_BB (cfg, is_null_bb);
3468 reset_cast_details (cfg);
3474 * Returns NULL and set the cfg exception on error.
3477 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3480 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
3481 int obj_reg = src->dreg;
3482 int vtable_reg = alloc_preg (cfg);
3483 int res_reg = alloc_ireg_ref (cfg);
3484 MonoInst *klass_inst = NULL;
3489 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
3490 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
3491 MonoInst *cache_ins;
3493 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
3498 /* klass - it's the second element of the cache entry*/
3499 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
3502 args [2] = cache_ins;
3504 return mono_emit_method_call (cfg, mono_isinst, args, NULL);
3507 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3509 if (is_complex_isinst (klass)) {
3510 /* Complex case, handle by an icall */
3516 args [1] = klass_inst;
3518 return mono_emit_jit_icall (cfg, mono_object_isinst, args);
3520 /* Simple case, the code below can handle it */
3524 NEW_BBLOCK (cfg, is_null_bb);
3525 NEW_BBLOCK (cfg, false_bb);
3526 NEW_BBLOCK (cfg, end_bb);
3528 /* Do the assignment at the beginning, so the other assignment can be if converted */
3529 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
3530 ins->type = STACK_OBJ;
3533 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3534 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
3536 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3538 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3539 g_assert (!context_used);
3540 /* the is_null_bb target simply copies the input register to the output */
3541 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
3543 int klass_reg = alloc_preg (cfg);
3546 int rank_reg = alloc_preg (cfg);
3547 int eclass_reg = alloc_preg (cfg);
3549 g_assert (!context_used);
3550 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3551 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
3552 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3553 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3554 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
3555 if (klass->cast_class == mono_defaults.object_class) {
3556 int parent_reg = alloc_preg (cfg);
3557 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
3558 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
3559 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3560 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3561 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
3562 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
3563 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3564 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3565 } else if (klass->cast_class == mono_defaults.enum_class) {
3566 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3567 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3568 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
3569 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3571 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
3572 /* Check that the object is a vector too */
3573 int bounds_reg = alloc_preg (cfg);
3574 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
3575 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
3576 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3579 /* the is_null_bb target simply copies the input register to the output */
3580 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3582 } else if (mono_class_is_nullable (klass)) {
3583 g_assert (!context_used);
3584 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3585 /* the is_null_bb target simply copies the input register to the output */
3586 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
3588 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3589 g_assert (!context_used);
3590 /* the remoting code is broken, access the class for now */
3591 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3592 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3594 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
3595 cfg->exception_ptr = klass;
3598 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3600 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3601 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3603 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3604 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
3606 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3607 /* the is_null_bb target simply copies the input register to the output */
3608 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
3613 MONO_START_BB (cfg, false_bb);
3615 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
3616 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3618 MONO_START_BB (cfg, is_null_bb);
3620 MONO_START_BB (cfg, end_bb);
3626 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3628 /* This opcode takes as input an object reference and a class, and returns:
3629 0) if the object is an instance of the class,
3630 1) if the object is not instance of the class,
3631 2) if the object is a proxy whose type cannot be determined */
3634 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
3635 int obj_reg = src->dreg;
3636 int dreg = alloc_ireg (cfg);
3638 int klass_reg = alloc_preg (cfg);
3640 NEW_BBLOCK (cfg, true_bb);
3641 NEW_BBLOCK (cfg, false_bb);
3642 NEW_BBLOCK (cfg, false2_bb);
3643 NEW_BBLOCK (cfg, end_bb);
3644 NEW_BBLOCK (cfg, no_proxy_bb);
3646 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3647 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
3649 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3650 NEW_BBLOCK (cfg, interface_fail_bb);
3652 tmp_reg = alloc_preg (cfg);
3653 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3654 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
3655 MONO_START_BB (cfg, interface_fail_bb);
3656 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3658 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
3660 tmp_reg = alloc_preg (cfg);
3661 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3662 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3663 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
3665 tmp_reg = alloc_preg (cfg);
3666 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3667 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3669 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3670 tmp_reg = alloc_preg (cfg);
3671 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3672 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3674 tmp_reg = alloc_preg (cfg);
3675 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3676 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3677 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3679 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
3680 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
3682 MONO_START_BB (cfg, no_proxy_bb);
3684 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
3687 MONO_START_BB (cfg, false_bb);
3689 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3690 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3692 MONO_START_BB (cfg, false2_bb);
3694 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
3695 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3697 MONO_START_BB (cfg, true_bb);
3699 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3701 MONO_START_BB (cfg, end_bb);
3704 MONO_INST_NEW (cfg, ins, OP_ICONST);
3706 ins->type = STACK_I4;
3712 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3714 /* This opcode takes as input an object reference and a class, and returns:
3715 0) if the object is an instance of the class,
3716 1) if the object is a proxy whose type cannot be determined
3717 an InvalidCastException exception is thrown otherwhise*/
3720 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
3721 int obj_reg = src->dreg;
3722 int dreg = alloc_ireg (cfg);
3723 int tmp_reg = alloc_preg (cfg);
3724 int klass_reg = alloc_preg (cfg);
3726 NEW_BBLOCK (cfg, end_bb);
3727 NEW_BBLOCK (cfg, ok_result_bb);
3729 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3730 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
3732 save_cast_details (cfg, klass, obj_reg);
3734 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3735 NEW_BBLOCK (cfg, interface_fail_bb);
3737 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3738 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
3739 MONO_START_BB (cfg, interface_fail_bb);
3740 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3742 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
3744 tmp_reg = alloc_preg (cfg);
3745 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3746 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3747 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
3749 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3750 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3753 NEW_BBLOCK (cfg, no_proxy_bb);
3755 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3756 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3757 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3759 tmp_reg = alloc_preg (cfg);
3760 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3761 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3763 tmp_reg = alloc_preg (cfg);
3764 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3765 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3766 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3768 NEW_BBLOCK (cfg, fail_1_bb);
3770 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
3772 MONO_START_BB (cfg, fail_1_bb);
3774 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3775 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3777 MONO_START_BB (cfg, no_proxy_bb);
3779 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
3782 MONO_START_BB (cfg, ok_result_bb);
3784 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3786 MONO_START_BB (cfg, end_bb);
3789 MONO_INST_NEW (cfg, ins, OP_ICONST);
3791 ins->type = STACK_I4;
3797 * Returns NULL and set the cfg exception on error.
3799 static G_GNUC_UNUSED MonoInst*
3800 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used)
3804 gpointer *trampoline;
3805 MonoInst *obj, *method_ins, *tramp_ins;
3809 obj = handle_alloc (cfg, klass, FALSE, 0);
3813 /* Inline the contents of mono_delegate_ctor */
3815 /* Set target field */
3816 /* Optimize away setting of NULL target */
3817 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0)) {
3818 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
3819 if (cfg->gen_write_barriers) {
3820 dreg = alloc_preg (cfg);
3821 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target));
3822 emit_write_barrier (cfg, ptr, target, 0);
3826 /* Set method field */
3827 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
3828 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
3829 if (cfg->gen_write_barriers) {
3830 dreg = alloc_preg (cfg);
3831 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method));
3832 emit_write_barrier (cfg, ptr, method_ins, 0);
3835 * To avoid looking up the compiled code belonging to the target method
3836 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
3837 * store it, and we fill it after the method has been compiled.
3839 if (!cfg->compile_aot && !method->dynamic && !(cfg->opt & MONO_OPT_SHARED)) {
3840 MonoInst *code_slot_ins;
3843 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
3845 domain = mono_domain_get ();
3846 mono_domain_lock (domain);
3847 if (!domain_jit_info (domain)->method_code_hash)
3848 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
3849 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
3851 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
3852 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
3854 mono_domain_unlock (domain);
3856 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
3858 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
3861 /* Set invoke_impl field */
3862 if (cfg->compile_aot) {
3863 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, klass);
3865 trampoline = mono_create_delegate_trampoline (cfg->domain, klass);
3866 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
3868 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
3870 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
3876 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
3878 MonoJitICallInfo *info;
3880 /* Need to register the icall so it gets an icall wrapper */
3881 info = mono_get_array_new_va_icall (rank);
3883 cfg->flags |= MONO_CFG_HAS_VARARGS;
3885 /* mono_array_new_va () needs a vararg calling convention */
3886 cfg->disable_llvm = TRUE;
3888 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
3889 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
3893 mono_emit_load_got_addr (MonoCompile *cfg)
3895 MonoInst *getaddr, *dummy_use;
3897 if (!cfg->got_var || cfg->got_var_allocated)
3900 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
3901 getaddr->dreg = cfg->got_var->dreg;
3903 /* Add it to the start of the first bblock */
3904 if (cfg->bb_entry->code) {
3905 getaddr->next = cfg->bb_entry->code;
3906 cfg->bb_entry->code = getaddr;
3909 MONO_ADD_INS (cfg->bb_entry, getaddr);
3911 cfg->got_var_allocated = TRUE;
3914 * Add a dummy use to keep the got_var alive, since real uses might
3915 * only be generated by the back ends.
3916 * Add it to end_bblock, so the variable's lifetime covers the whole
3918 * It would be better to make the usage of the got var explicit in all
3919 * cases when the backend needs it (i.e. calls, throw etc.), so this
3920 * wouldn't be needed.
3922 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
3923 MONO_ADD_INS (cfg->bb_exit, dummy_use);
3926 static int inline_limit;
3927 static gboolean inline_limit_inited;
3930 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
3932 MonoMethodHeaderSummary header;
3934 #ifdef MONO_ARCH_SOFT_FLOAT
3935 MonoMethodSignature *sig = mono_method_signature (method);
3939 if (cfg->generic_sharing_context)
3942 if (cfg->inline_depth > 10)
3945 #ifdef MONO_ARCH_HAVE_LMF_OPS
3946 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3947 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
3948 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
3953 if (!mono_method_get_header_summary (method, &header))
3956 /*runtime, icall and pinvoke are checked by summary call*/
3957 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
3958 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
3959 (method->klass->marshalbyref) ||
3963 /* also consider num_locals? */
3964 /* Do the size check early to avoid creating vtables */
3965 if (!inline_limit_inited) {
3966 if (getenv ("MONO_INLINELIMIT"))
3967 inline_limit = atoi (getenv ("MONO_INLINELIMIT"));
3969 inline_limit = INLINE_LENGTH_LIMIT;
3970 inline_limit_inited = TRUE;
3972 if (header.code_size >= inline_limit && !(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
3976 * if we can initialize the class of the method right away, we do,
3977 * otherwise we don't allow inlining if the class needs initialization,
3978 * since it would mean inserting a call to mono_runtime_class_init()
3979 * inside the inlined code
3981 if (!(cfg->opt & MONO_OPT_SHARED)) {
3982 if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
3983 if (cfg->run_cctors && method->klass->has_cctor) {
3984 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
3985 if (!method->klass->runtime_info)
3986 /* No vtable created yet */
3988 vtable = mono_class_vtable (cfg->domain, method->klass);
3991 /* This makes so that inline cannot trigger */
3992 /* .cctors: too many apps depend on them */
3993 /* running with a specific order... */
3994 if (! vtable->initialized)
3996 mono_runtime_class_init (vtable);
3998 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
3999 if (!method->klass->runtime_info)
4000 /* No vtable created yet */
4002 vtable = mono_class_vtable (cfg->domain, method->klass);
4005 if (!vtable->initialized)
4010 * If we're compiling for shared code
4011 * the cctor will need to be run at aot method load time, for example,
4012 * or at the end of the compilation of the inlining method.
4014 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
4019 * CAS - do not inline methods with declarative security
4020 * Note: this has to be before any possible return TRUE;
4022 if (mono_method_has_declsec (method))
4025 #ifdef MONO_ARCH_SOFT_FLOAT
4027 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
4029 for (i = 0; i < sig->param_count; ++i)
4030 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
4038 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoVTable *vtable)
4040 if (vtable->initialized && !cfg->compile_aot)
4043 if (vtable->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)
4046 if (!mono_class_needs_cctor_run (vtable->klass, method))
4049 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (vtable->klass == method->klass))
4050 /* The initialization is already done before the method is called */
4057 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
4061 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
4063 mono_class_init (klass);
4064 size = mono_class_array_element_size (klass);
4066 mult_reg = alloc_preg (cfg);
4067 array_reg = arr->dreg;
4068 index_reg = index->dreg;
4070 #if SIZEOF_REGISTER == 8
4071 /* The array reg is 64 bits but the index reg is only 32 */
4072 if (COMPILE_LLVM (cfg)) {
4074 index2_reg = index_reg;
4076 index2_reg = alloc_preg (cfg);
4077 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
4080 if (index->type == STACK_I8) {
4081 index2_reg = alloc_preg (cfg);
4082 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
4084 index2_reg = index_reg;
4089 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
4091 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4092 if (size == 1 || size == 2 || size == 4 || size == 8) {
4093 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
4095 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], G_STRUCT_OFFSET (MonoArray, vector));
4096 ins->klass = mono_class_get_element_class (klass);
4097 ins->type = STACK_MP;
4103 add_reg = alloc_ireg_mp (cfg);
4105 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
4106 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
4107 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
4108 ins->klass = mono_class_get_element_class (klass);
4109 ins->type = STACK_MP;
4110 MONO_ADD_INS (cfg->cbb, ins);
4115 #ifndef MONO_ARCH_EMULATE_MUL_DIV
4117 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
4119 int bounds_reg = alloc_preg (cfg);
4120 int add_reg = alloc_ireg_mp (cfg);
4121 int mult_reg = alloc_preg (cfg);
4122 int mult2_reg = alloc_preg (cfg);
4123 int low1_reg = alloc_preg (cfg);
4124 int low2_reg = alloc_preg (cfg);
4125 int high1_reg = alloc_preg (cfg);
4126 int high2_reg = alloc_preg (cfg);
4127 int realidx1_reg = alloc_preg (cfg);
4128 int realidx2_reg = alloc_preg (cfg);
4129 int sum_reg = alloc_preg (cfg);
4134 mono_class_init (klass);
4135 size = mono_class_array_element_size (klass);
4137 index1 = index_ins1->dreg;
4138 index2 = index_ins2->dreg;
4140 /* range checking */
4141 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
4142 arr->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
4144 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
4145 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4146 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
4147 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
4148 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
4149 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
4150 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4152 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
4153 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4154 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
4155 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
4156 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, length));
4157 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
4158 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4160 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
4161 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
4162 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
4163 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
4164 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
4166 ins->type = STACK_MP;
4168 MONO_ADD_INS (cfg->cbb, ins);
4175 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
4179 MonoMethod *addr_method;
4182 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
4185 return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], TRUE);
4187 #ifndef MONO_ARCH_EMULATE_MUL_DIV
4188 /* emit_ldelema_2 depends on OP_LMUL */
4189 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS)) {
4190 return mini_emit_ldelema_2_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], sp [2]);
4194 element_size = mono_class_array_element_size (cmethod->klass->element_class);
4195 addr_method = mono_marshal_get_array_address (rank, element_size);
4196 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
4201 static MonoBreakPolicy
4202 always_insert_breakpoint (MonoMethod *method)
4204 return MONO_BREAK_POLICY_ALWAYS;
4207 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
4210 * mono_set_break_policy:
4211 * policy_callback: the new callback function
4213 * Allow embedders to decide wherther to actually obey breakpoint instructions
4214 * (both break IL instructions and Debugger.Break () method calls), for example
4215 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
4216 * untrusted or semi-trusted code.
4218 * @policy_callback will be called every time a break point instruction needs to
4219 * be inserted with the method argument being the method that calls Debugger.Break()
4220 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
4221 * if it wants the breakpoint to not be effective in the given method.
4222 * #MONO_BREAK_POLICY_ALWAYS is the default.
4225 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
4227 if (policy_callback)
4228 break_policy_func = policy_callback;
4230 break_policy_func = always_insert_breakpoint;
4234 should_insert_brekpoint (MonoMethod *method) {
4235 switch (break_policy_func (method)) {
4236 case MONO_BREAK_POLICY_ALWAYS:
4238 case MONO_BREAK_POLICY_NEVER:
4240 case MONO_BREAK_POLICY_ON_DBG:
4241 return mono_debug_using_mono_debugger ();
4243 g_warning ("Incorrect value returned from break policy callback");
4248 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
4250 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
4252 MonoInst *addr, *store, *load;
4253 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
4255 /* the bounds check is already done by the callers */
4256 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
4258 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
4259 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
4261 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
4262 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
4268 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4270 MonoInst *ins = NULL;
4271 #ifdef MONO_ARCH_SIMD_INTRINSICS
4272 if (cfg->opt & MONO_OPT_SIMD) {
4273 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
4283 emit_memory_barrier (MonoCompile *cfg, int kind)
4285 MonoInst *ins = NULL;
4286 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
4287 MONO_ADD_INS (cfg->cbb, ins);
4288 ins->backend.memory_barrier_kind = kind;
4294 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4296 MonoInst *ins = NULL;
4298 static MonoClass *runtime_helpers_class = NULL;
4299 if (! runtime_helpers_class)
4300 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
4301 "System.Runtime.CompilerServices", "RuntimeHelpers");
4303 if (cmethod->klass == mono_defaults.string_class) {
4304 if (strcmp (cmethod->name, "get_Chars") == 0) {
4305 int dreg = alloc_ireg (cfg);
4306 int index_reg = alloc_preg (cfg);
4307 int mult_reg = alloc_preg (cfg);
4308 int add_reg = alloc_preg (cfg);
4310 #if SIZEOF_REGISTER == 8
4311 /* The array reg is 64 bits but the index reg is only 32 */
4312 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
4314 index_reg = args [1]->dreg;
4316 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
4318 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4319 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, G_STRUCT_OFFSET (MonoString, chars));
4320 add_reg = ins->dreg;
4321 /* Avoid a warning */
4323 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
4326 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
4327 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
4328 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
4329 add_reg, G_STRUCT_OFFSET (MonoString, chars));
4331 type_from_op (ins, NULL, NULL);
4333 } else if (strcmp (cmethod->name, "get_Length") == 0) {
4334 int dreg = alloc_ireg (cfg);
4335 /* Decompose later to allow more optimizations */
4336 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
4337 ins->type = STACK_I4;
4338 ins->flags |= MONO_INST_FAULT;
4339 cfg->cbb->has_array_access = TRUE;
4340 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
4343 } else if (strcmp (cmethod->name, "InternalSetChar") == 0) {
4344 int mult_reg = alloc_preg (cfg);
4345 int add_reg = alloc_preg (cfg);
4347 /* The corlib functions check for oob already. */
4348 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
4349 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
4350 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, G_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
4351 return cfg->cbb->last_ins;
4354 } else if (cmethod->klass == mono_defaults.object_class) {
4356 if (strcmp (cmethod->name, "GetType") == 0) {
4357 int dreg = alloc_ireg_ref (cfg);
4358 int vt_reg = alloc_preg (cfg);
4359 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
4360 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, G_STRUCT_OFFSET (MonoVTable, type));
4361 type_from_op (ins, NULL, NULL);
4364 #if !defined(MONO_ARCH_EMULATE_MUL_DIV)
4365 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0 && !mono_gc_is_moving ()) {
4366 int dreg = alloc_ireg (cfg);
4367 int t1 = alloc_ireg (cfg);
4369 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
4370 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
4371 ins->type = STACK_I4;
4375 } else if (strcmp (cmethod->name, ".ctor") == 0) {
4376 MONO_INST_NEW (cfg, ins, OP_NOP);
4377 MONO_ADD_INS (cfg->cbb, ins);
4381 } else if (cmethod->klass == mono_defaults.array_class) {
4382 if (strcmp (cmethod->name + 1, "etGenericValueImpl") == 0)
4383 return emit_array_generic_access (cfg, fsig, args, *cmethod->name == 'S');
4385 #ifndef MONO_BIG_ARRAYS
4387 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
4390 if ((strcmp (cmethod->name, "GetLength") == 0 || strcmp (cmethod->name, "GetLowerBound") == 0) && args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
4391 int dreg = alloc_ireg (cfg);
4392 int bounds_reg = alloc_ireg_mp (cfg);
4393 MonoBasicBlock *end_bb, *szarray_bb;
4394 gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
4396 NEW_BBLOCK (cfg, end_bb);
4397 NEW_BBLOCK (cfg, szarray_bb);
4399 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
4400 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
4401 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
4402 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
4403 /* Non-szarray case */
4405 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4406 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
4408 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4409 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4410 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4411 MONO_START_BB (cfg, szarray_bb);
4414 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4415 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
4417 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4418 MONO_START_BB (cfg, end_bb);
4420 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
4421 ins->type = STACK_I4;
4427 if (cmethod->name [0] != 'g')
4430 if (strcmp (cmethod->name, "get_Rank") == 0) {
4431 int dreg = alloc_ireg (cfg);
4432 int vtable_reg = alloc_preg (cfg);
4433 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
4434 args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
4435 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
4436 vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
4437 type_from_op (ins, NULL, NULL);
4440 } else if (strcmp (cmethod->name, "get_Length") == 0) {
4441 int dreg = alloc_ireg (cfg);
4443 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4444 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
4445 type_from_op (ins, NULL, NULL);
4450 } else if (cmethod->klass == runtime_helpers_class) {
4452 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0) {
4453 EMIT_NEW_ICONST (cfg, ins, G_STRUCT_OFFSET (MonoString, chars));
4457 } else if (cmethod->klass == mono_defaults.thread_class) {
4458 if (strcmp (cmethod->name, "SpinWait_nop") == 0) {
4459 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
4460 MONO_ADD_INS (cfg->cbb, ins);
4462 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0) {
4463 return emit_memory_barrier (cfg, FullBarrier);
4465 } else if (cmethod->klass == mono_defaults.monitor_class) {
4467 /* FIXME this should be integrated to the check below once we support the trampoline version */
4468 #if defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
4469 if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 2) {
4470 MonoMethod *fast_method = NULL;
4472 /* Avoid infinite recursion */
4473 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN && !strcmp (cfg->method->name, "FastMonitorEnterV4"))
4476 fast_method = mono_monitor_get_fast_path (cmethod);
4480 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
4484 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
4485 if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 1) {
4488 if (COMPILE_LLVM (cfg)) {
4490 * Pass the argument normally, the LLVM backend will handle the
4491 * calling convention problems.
4493 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
4495 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
4496 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
4497 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
4498 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
4501 return (MonoInst*)call;
4502 } else if (strcmp (cmethod->name, "Exit") == 0) {
4505 if (COMPILE_LLVM (cfg)) {
4506 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
4508 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
4509 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
4510 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
4511 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
4514 return (MonoInst*)call;
4516 #elif defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
4518 MonoMethod *fast_method = NULL;
4520 /* Avoid infinite recursion */
4521 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN &&
4522 (strcmp (cfg->method->name, "FastMonitorEnter") == 0 ||
4523 strcmp (cfg->method->name, "FastMonitorExit") == 0))
4526 if ((strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 2) ||
4527 strcmp (cmethod->name, "Exit") == 0)
4528 fast_method = mono_monitor_get_fast_path (cmethod);
4532 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
4535 } else if (cmethod->klass->image == mono_defaults.corlib &&
4536 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
4537 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
4540 #if SIZEOF_REGISTER == 8
4541 if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
4542 /* 64 bit reads are already atomic */
4543 MONO_INST_NEW (cfg, ins, OP_LOADI8_MEMBASE);
4544 ins->dreg = mono_alloc_preg (cfg);
4545 ins->inst_basereg = args [0]->dreg;
4546 ins->inst_offset = 0;
4547 MONO_ADD_INS (cfg->cbb, ins);
4551 #ifdef MONO_ARCH_HAVE_ATOMIC_ADD
4552 if (strcmp (cmethod->name, "Increment") == 0) {
4553 MonoInst *ins_iconst;
4556 if (fsig->params [0]->type == MONO_TYPE_I4)
4557 opcode = OP_ATOMIC_ADD_NEW_I4;
4558 #if SIZEOF_REGISTER == 8
4559 else if (fsig->params [0]->type == MONO_TYPE_I8)
4560 opcode = OP_ATOMIC_ADD_NEW_I8;
4563 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
4564 ins_iconst->inst_c0 = 1;
4565 ins_iconst->dreg = mono_alloc_ireg (cfg);
4566 MONO_ADD_INS (cfg->cbb, ins_iconst);
4568 MONO_INST_NEW (cfg, ins, opcode);
4569 ins->dreg = mono_alloc_ireg (cfg);
4570 ins->inst_basereg = args [0]->dreg;
4571 ins->inst_offset = 0;
4572 ins->sreg2 = ins_iconst->dreg;
4573 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4574 MONO_ADD_INS (cfg->cbb, ins);
4576 } else if (strcmp (cmethod->name, "Decrement") == 0) {
4577 MonoInst *ins_iconst;
4580 if (fsig->params [0]->type == MONO_TYPE_I4)
4581 opcode = OP_ATOMIC_ADD_NEW_I4;
4582 #if SIZEOF_REGISTER == 8
4583 else if (fsig->params [0]->type == MONO_TYPE_I8)
4584 opcode = OP_ATOMIC_ADD_NEW_I8;
4587 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
4588 ins_iconst->inst_c0 = -1;
4589 ins_iconst->dreg = mono_alloc_ireg (cfg);
4590 MONO_ADD_INS (cfg->cbb, ins_iconst);
4592 MONO_INST_NEW (cfg, ins, opcode);
4593 ins->dreg = mono_alloc_ireg (cfg);
4594 ins->inst_basereg = args [0]->dreg;
4595 ins->inst_offset = 0;
4596 ins->sreg2 = ins_iconst->dreg;
4597 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4598 MONO_ADD_INS (cfg->cbb, ins);
4600 } else if (strcmp (cmethod->name, "Add") == 0) {
4603 if (fsig->params [0]->type == MONO_TYPE_I4)
4604 opcode = OP_ATOMIC_ADD_NEW_I4;
4605 #if SIZEOF_REGISTER == 8
4606 else if (fsig->params [0]->type == MONO_TYPE_I8)
4607 opcode = OP_ATOMIC_ADD_NEW_I8;
4611 MONO_INST_NEW (cfg, ins, opcode);
4612 ins->dreg = mono_alloc_ireg (cfg);
4613 ins->inst_basereg = args [0]->dreg;
4614 ins->inst_offset = 0;
4615 ins->sreg2 = args [1]->dreg;
4616 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4617 MONO_ADD_INS (cfg->cbb, ins);
4620 #endif /* MONO_ARCH_HAVE_ATOMIC_ADD */
4622 #ifdef MONO_ARCH_HAVE_ATOMIC_EXCHANGE
4623 if (strcmp (cmethod->name, "Exchange") == 0) {
4625 gboolean is_ref = fsig->params [0]->type == MONO_TYPE_OBJECT;
4627 if (fsig->params [0]->type == MONO_TYPE_I4)
4628 opcode = OP_ATOMIC_EXCHANGE_I4;
4629 #if SIZEOF_REGISTER == 8
4630 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I8) ||
4631 (fsig->params [0]->type == MONO_TYPE_I))
4632 opcode = OP_ATOMIC_EXCHANGE_I8;
4634 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I))
4635 opcode = OP_ATOMIC_EXCHANGE_I4;
4640 MONO_INST_NEW (cfg, ins, opcode);
4641 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : mono_alloc_ireg (cfg);
4642 ins->inst_basereg = args [0]->dreg;
4643 ins->inst_offset = 0;
4644 ins->sreg2 = args [1]->dreg;
4645 MONO_ADD_INS (cfg->cbb, ins);
4647 switch (fsig->params [0]->type) {
4649 ins->type = STACK_I4;
4653 ins->type = STACK_I8;
4655 case MONO_TYPE_OBJECT:
4656 ins->type = STACK_OBJ;
4659 g_assert_not_reached ();
4662 if (cfg->gen_write_barriers && is_ref)
4663 emit_write_barrier (cfg, args [0], args [1], -1);
4665 #endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
4667 #ifdef MONO_ARCH_HAVE_ATOMIC_CAS
4668 if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
4670 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [1]);
4671 if (fsig->params [1]->type == MONO_TYPE_I4)
4673 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I)
4674 size = sizeof (gpointer);
4675 else if (sizeof (gpointer) == 8 && fsig->params [1]->type == MONO_TYPE_I8)
4678 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
4679 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
4680 ins->sreg1 = args [0]->dreg;
4681 ins->sreg2 = args [1]->dreg;
4682 ins->sreg3 = args [2]->dreg;
4683 ins->type = STACK_I4;
4684 MONO_ADD_INS (cfg->cbb, ins);
4685 } else if (size == 8) {
4686 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I8);
4687 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
4688 ins->sreg1 = args [0]->dreg;
4689 ins->sreg2 = args [1]->dreg;
4690 ins->sreg3 = args [2]->dreg;
4691 ins->type = STACK_I8;
4692 MONO_ADD_INS (cfg->cbb, ins);
4694 /* g_assert_not_reached (); */
4696 if (cfg->gen_write_barriers && is_ref)
4697 emit_write_barrier (cfg, args [0], args [1], -1);
4699 #endif /* MONO_ARCH_HAVE_ATOMIC_CAS */
4703 } else if (cmethod->klass->image == mono_defaults.corlib) {
4704 if (cmethod->name [0] == 'B' && strcmp (cmethod->name, "Break") == 0
4705 && strcmp (cmethod->klass->name, "Debugger") == 0) {
4706 if (should_insert_brekpoint (cfg->method)) {
4707 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
4709 MONO_INST_NEW (cfg, ins, OP_NOP);
4710 MONO_ADD_INS (cfg->cbb, ins);
4714 if (cmethod->name [0] == 'g' && strcmp (cmethod->name, "get_IsRunningOnWindows") == 0
4715 && strcmp (cmethod->klass->name, "Environment") == 0) {
4717 EMIT_NEW_ICONST (cfg, ins, 1);
4719 EMIT_NEW_ICONST (cfg, ins, 0);
4723 } else if (cmethod->klass == mono_defaults.math_class) {
4725 * There is general branches code for Min/Max, but it does not work for
4727 * http://everything2.com/?node_id=1051618
4731 #ifdef MONO_ARCH_SIMD_INTRINSICS
4732 if (cfg->opt & MONO_OPT_SIMD) {
4733 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
4739 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
4743 * This entry point could be used later for arbitrary method
4746 inline static MonoInst*
4747 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
4748 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
4750 if (method->klass == mono_defaults.string_class) {
4751 /* managed string allocation support */
4752 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS) && !(cfg->opt & MONO_OPT_SHARED)) {
4753 MonoInst *iargs [2];
4754 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4755 MonoMethod *managed_alloc = NULL;
4757 g_assert (vtable); /*Should not fail since it System.String*/
4758 #ifndef MONO_CROSS_COMPILE
4759 managed_alloc = mono_gc_get_managed_allocator (vtable, FALSE);
4763 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4764 iargs [1] = args [0];
4765 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
4772 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
4774 MonoInst *store, *temp;
4777 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
4778 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
4781 * FIXME: We should use *args++ = sp [0], but that would mean the arg
4782 * would be different than the MonoInst's used to represent arguments, and
4783 * the ldelema implementation can't deal with that.
4784 * Solution: When ldelema is used on an inline argument, create a var for
4785 * it, emit ldelema on that var, and emit the saving code below in
4786 * inline_method () if needed.
4788 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
4789 cfg->args [i] = temp;
4790 /* This uses cfg->args [i] which is set by the preceeding line */
4791 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
4792 store->cil_code = sp [0]->cil_code;
4797 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
4798 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
4800 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4802 check_inline_called_method_name_limit (MonoMethod *called_method)
4805 static char *limit = NULL;
4807 if (limit == NULL) {
4808 char *limit_string = getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
4810 if (limit_string != NULL)
4811 limit = limit_string;
4813 limit = (char *) "";
4816 if (limit [0] != '\0') {
4817 char *called_method_name = mono_method_full_name (called_method, TRUE);
4819 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
4820 g_free (called_method_name);
4822 //return (strncmp_result <= 0);
4823 return (strncmp_result == 0);
4830 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4832 check_inline_caller_method_name_limit (MonoMethod *caller_method)
4835 static char *limit = NULL;
4837 if (limit == NULL) {
4838 char *limit_string = getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
4839 if (limit_string != NULL) {
4840 limit = limit_string;
4842 limit = (char *) "";
4846 if (limit [0] != '\0') {
4847 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
4849 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
4850 g_free (caller_method_name);
4852 //return (strncmp_result <= 0);
4853 return (strncmp_result == 0);
4861 emit_init_rvar (MonoCompile *cfg, MonoInst *rvar, MonoType *rtype)
4863 static double r8_0 = 0.0;
4866 switch (rvar->type) {
4868 MONO_EMIT_NEW_ICONST (cfg, rvar->dreg, 0);
4871 MONO_EMIT_NEW_I8CONST (cfg, rvar->dreg, 0);
4876 MONO_EMIT_NEW_PCONST (cfg, rvar->dreg, 0);
4879 MONO_INST_NEW (cfg, ins, OP_R8CONST);
4880 ins->type = STACK_R8;
4881 ins->inst_p0 = (void*)&r8_0;
4882 ins->dreg = rvar->dreg;
4883 MONO_ADD_INS (cfg->cbb, ins);
4886 MONO_EMIT_NEW_VZERO (cfg, rvar->dreg, mono_class_from_mono_type (rtype));
4889 g_assert_not_reached ();
4894 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
4895 guchar *ip, guint real_offset, GList *dont_inline, gboolean inline_always)
4897 MonoInst *ins, *rvar = NULL;
4898 MonoMethodHeader *cheader;
4899 MonoBasicBlock *ebblock, *sbblock;
4901 MonoMethod *prev_inlined_method;
4902 MonoInst **prev_locals, **prev_args;
4903 MonoType **prev_arg_types;
4904 guint prev_real_offset;
4905 GHashTable *prev_cbb_hash;
4906 MonoBasicBlock **prev_cil_offset_to_bb;
4907 MonoBasicBlock *prev_cbb;
4908 unsigned char* prev_cil_start;
4909 guint32 prev_cil_offset_to_bb_len;
4910 MonoMethod *prev_current_method;
4911 MonoGenericContext *prev_generic_context;
4912 gboolean ret_var_set, prev_ret_var_set, virtual = FALSE;
4914 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
4916 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4917 if ((! inline_always) && ! check_inline_called_method_name_limit (cmethod))
4920 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4921 if ((! inline_always) && ! check_inline_caller_method_name_limit (cfg->method))
4925 if (cfg->verbose_level > 2)
4926 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4928 if (!cmethod->inline_info) {
4929 cfg->stat_inlineable_methods++;
4930 cmethod->inline_info = 1;
4933 /* allocate local variables */
4934 cheader = mono_method_get_header (cmethod);
4936 if (cheader == NULL || mono_loader_get_last_error ()) {
4937 MonoLoaderError *error = mono_loader_get_last_error ();
4940 mono_metadata_free_mh (cheader);
4941 if (inline_always && error)
4942 mono_cfg_set_exception (cfg, error->exception_type);
4944 mono_loader_clear_error ();
4948 /*Must verify before creating locals as it can cause the JIT to assert.*/
4949 if (mono_compile_is_broken (cfg, cmethod, FALSE)) {
4950 mono_metadata_free_mh (cheader);
4954 /* allocate space to store the return value */
4955 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
4956 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
4959 prev_locals = cfg->locals;
4960 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
4961 for (i = 0; i < cheader->num_locals; ++i)
4962 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
4964 /* allocate start and end blocks */
4965 /* This is needed so if the inline is aborted, we can clean up */
4966 NEW_BBLOCK (cfg, sbblock);
4967 sbblock->real_offset = real_offset;
4969 NEW_BBLOCK (cfg, ebblock);
4970 ebblock->block_num = cfg->num_bblocks++;
4971 ebblock->real_offset = real_offset;
4973 prev_args = cfg->args;
4974 prev_arg_types = cfg->arg_types;
4975 prev_inlined_method = cfg->inlined_method;
4976 cfg->inlined_method = cmethod;
4977 cfg->ret_var_set = FALSE;
4978 cfg->inline_depth ++;
4979 prev_real_offset = cfg->real_offset;
4980 prev_cbb_hash = cfg->cbb_hash;
4981 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
4982 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
4983 prev_cil_start = cfg->cil_start;
4984 prev_cbb = cfg->cbb;
4985 prev_current_method = cfg->current_method;
4986 prev_generic_context = cfg->generic_context;
4987 prev_ret_var_set = cfg->ret_var_set;
4989 if (*ip == CEE_CALLVIRT && !(cmethod->flags & METHOD_ATTRIBUTE_STATIC))
4992 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, dont_inline, sp, real_offset, virtual);
4994 ret_var_set = cfg->ret_var_set;
4996 cfg->inlined_method = prev_inlined_method;
4997 cfg->real_offset = prev_real_offset;
4998 cfg->cbb_hash = prev_cbb_hash;
4999 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
5000 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
5001 cfg->cil_start = prev_cil_start;
5002 cfg->locals = prev_locals;
5003 cfg->args = prev_args;
5004 cfg->arg_types = prev_arg_types;
5005 cfg->current_method = prev_current_method;
5006 cfg->generic_context = prev_generic_context;
5007 cfg->ret_var_set = prev_ret_var_set;
5008 cfg->inline_depth --;
5010 if ((costs >= 0 && costs < 60) || inline_always) {
5011 if (cfg->verbose_level > 2)
5012 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
5014 cfg->stat_inlined_methods++;
5016 /* always add some code to avoid block split failures */
5017 MONO_INST_NEW (cfg, ins, OP_NOP);
5018 MONO_ADD_INS (prev_cbb, ins);
5020 prev_cbb->next_bb = sbblock;
5021 link_bblock (cfg, prev_cbb, sbblock);
5024 * Get rid of the begin and end bblocks if possible to aid local
5027 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
5029 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
5030 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
5032 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
5033 MonoBasicBlock *prev = ebblock->in_bb [0];
5034 mono_merge_basic_blocks (cfg, prev, ebblock);
5036 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
5037 mono_merge_basic_blocks (cfg, prev_cbb, prev);
5038 cfg->cbb = prev_cbb;
5042 * Its possible that the rvar is set in some prev bblock, but not in others.
5048 for (i = 0; i < ebblock->in_count; ++i) {
5049 bb = ebblock->in_bb [i];
5051 if (bb->last_ins && bb->last_ins->opcode == OP_NOT_REACHED) {
5054 emit_init_rvar (cfg, rvar, fsig->ret);
5064 * If the inlined method contains only a throw, then the ret var is not
5065 * set, so set it to a dummy value.
5068 emit_init_rvar (cfg, rvar, fsig->ret);
5070 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
5073 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
5076 if (cfg->verbose_level > 2)
5077 printf ("INLINE ABORTED %s\n", mono_method_full_name (cmethod, TRUE));
5078 cfg->exception_type = MONO_EXCEPTION_NONE;
5079 mono_loader_clear_error ();
5081 /* This gets rid of the newly added bblocks */
5082 cfg->cbb = prev_cbb;
5084 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
5089 * Some of these comments may well be out-of-date.
5090 * Design decisions: we do a single pass over the IL code (and we do bblock
5091 * splitting/merging in the few cases when it's required: a back jump to an IL
5092 * address that was not already seen as bblock starting point).
5093 * Code is validated as we go (full verification is still better left to metadata/verify.c).
5094 * Complex operations are decomposed in simpler ones right away. We need to let the
5095 * arch-specific code peek and poke inside this process somehow (except when the
5096 * optimizations can take advantage of the full semantic info of coarse opcodes).
5097 * All the opcodes of the form opcode.s are 'normalized' to opcode.
5098 * MonoInst->opcode initially is the IL opcode or some simplification of that
5099 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
5100 * opcode with value bigger than OP_LAST.
5101 * At this point the IR can be handed over to an interpreter, a dumb code generator
5102 * or to the optimizing code generator that will translate it to SSA form.
5104 * Profiling directed optimizations.
5105 * We may compile by default with few or no optimizations and instrument the code
5106 * or the user may indicate what methods to optimize the most either in a config file
5107 * or through repeated runs where the compiler applies offline the optimizations to
5108 * each method and then decides if it was worth it.
5111 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
5112 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
5113 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
5114 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
5115 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
5116 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
5117 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
5118 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) {cfg->exception_ptr = klass; LOAD_ERROR;}
5120 /* offset from br.s -> br like opcodes */
5121 #define BIG_BRANCH_OFFSET 13
5124 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
5126 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
5128 return b == NULL || b == bb;
5132 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
5134 unsigned char *ip = start;
5135 unsigned char *target;
5138 MonoBasicBlock *bblock;
5139 const MonoOpcode *opcode;
5142 cli_addr = ip - start;
5143 i = mono_opcode_value ((const guint8 **)&ip, end);
5146 opcode = &mono_opcodes [i];
5147 switch (opcode->argument) {
5148 case MonoInlineNone:
5151 case MonoInlineString:
5152 case MonoInlineType:
5153 case MonoInlineField:
5154 case MonoInlineMethod:
5157 case MonoShortInlineR:
5164 case MonoShortInlineVar:
5165 case MonoShortInlineI:
5168 case MonoShortInlineBrTarget:
5169 target = start + cli_addr + 2 + (signed char)ip [1];
5170 GET_BBLOCK (cfg, bblock, target);
5173 GET_BBLOCK (cfg, bblock, ip);
5175 case MonoInlineBrTarget:
5176 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
5177 GET_BBLOCK (cfg, bblock, target);
5180 GET_BBLOCK (cfg, bblock, ip);
5182 case MonoInlineSwitch: {
5183 guint32 n = read32 (ip + 1);
5186 cli_addr += 5 + 4 * n;
5187 target = start + cli_addr;
5188 GET_BBLOCK (cfg, bblock, target);
5190 for (j = 0; j < n; ++j) {
5191 target = start + cli_addr + (gint32)read32 (ip);
5192 GET_BBLOCK (cfg, bblock, target);
5202 g_assert_not_reached ();
5205 if (i == CEE_THROW) {
5206 unsigned char *bb_start = ip - 1;
5208 /* Find the start of the bblock containing the throw */
5210 while ((bb_start >= start) && !bblock) {
5211 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
5215 bblock->out_of_line = 1;
5224 static inline MonoMethod *
5225 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
5229 if (m->wrapper_type != MONO_WRAPPER_NONE)
5230 return mono_method_get_wrapper_data (m, token);
5232 method = mono_get_method_full (m->klass->image, token, klass, context);
5237 static inline MonoMethod *
5238 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
5240 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
5242 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
5248 static inline MonoClass*
5249 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
5253 if (method->wrapper_type != MONO_WRAPPER_NONE)
5254 klass = mono_method_get_wrapper_data (method, token);
5256 klass = mono_class_get_full (method->klass->image, token, context);
5258 mono_class_init (klass);
5263 * Returns TRUE if the JIT should abort inlining because "callee"
5264 * is influenced by security attributes.
5267 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
5271 if ((cfg->method != caller) && mono_method_has_declsec (callee)) {
5275 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
5276 if (result == MONO_JIT_SECURITY_OK)
5279 if (result == MONO_JIT_LINKDEMAND_ECMA) {
5280 /* Generate code to throw a SecurityException before the actual call/link */
5281 MonoSecurityManager *secman = mono_security_manager_get_methods ();
5284 NEW_ICONST (cfg, args [0], 4);
5285 NEW_METHODCONST (cfg, args [1], caller);
5286 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
5287 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
5288 /* don't hide previous results */
5289 mono_cfg_set_exception (cfg, MONO_EXCEPTION_SECURITY_LINKDEMAND);
5290 cfg->exception_data = result;
5298 throw_exception (void)
5300 static MonoMethod *method = NULL;
5303 MonoSecurityManager *secman = mono_security_manager_get_methods ();
5304 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
5311 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
5313 MonoMethod *thrower = throw_exception ();
5316 EMIT_NEW_PCONST (cfg, args [0], ex);
5317 mono_emit_method_call (cfg, thrower, args, NULL);
5321 * Return the original method is a wrapper is specified. We can only access
5322 * the custom attributes from the original method.
5325 get_original_method (MonoMethod *method)
5327 if (method->wrapper_type == MONO_WRAPPER_NONE)
5330 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
5331 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
5334 /* in other cases we need to find the original method */
5335 return mono_marshal_method_from_wrapper (method);
5339 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
5340 MonoBasicBlock *bblock, unsigned char *ip)
5342 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
5343 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
5345 emit_throw_exception (cfg, ex);
5349 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
5350 MonoBasicBlock *bblock, unsigned char *ip)
5352 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
5353 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
5355 emit_throw_exception (cfg, ex);
5359 * Check that the IL instructions at ip are the array initialization
5360 * sequence and return the pointer to the data and the size.
5363 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
5366 * newarr[System.Int32]
5368 * ldtoken field valuetype ...
5369 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
5371 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
5372 guint32 token = read32 (ip + 7);
5373 guint32 field_token = read32 (ip + 2);
5374 guint32 field_index = field_token & 0xffffff;
5376 const char *data_ptr;
5378 MonoMethod *cmethod;
5379 MonoClass *dummy_class;
5380 MonoClassField *field = mono_field_from_token (method->klass->image, field_token, &dummy_class, NULL);
5386 *out_field_token = field_token;
5388 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
5391 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
5393 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
5394 case MONO_TYPE_BOOLEAN:
5398 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
5399 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
5400 case MONO_TYPE_CHAR:
5410 return NULL; /* stupid ARM FP swapped format */
5420 if (size > mono_type_size (field->type, &dummy_align))
5423 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
5424 if (!method->klass->image->dynamic) {
5425 field_index = read32 (ip + 2) & 0xffffff;
5426 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
5427 data_ptr = mono_image_rva_map (method->klass->image, rva);
5428 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
5429 /* for aot code we do the lookup on load */
5430 if (aot && data_ptr)
5431 return GUINT_TO_POINTER (rva);
5433 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
5435 data_ptr = mono_field_get_data (field);
5443 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
5445 char *method_fname = mono_method_full_name (method, TRUE);
5447 MonoMethodHeader *header = mono_method_get_header (method);
5449 if (header->code_size == 0)
5450 method_code = g_strdup ("method body is empty.");
5452 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
5453 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
5454 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
5455 g_free (method_fname);
5456 g_free (method_code);
5457 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
5461 set_exception_object (MonoCompile *cfg, MonoException *exception)
5463 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OBJECT_SUPPLIED);
5464 MONO_GC_REGISTER_ROOT_SINGLE (cfg->exception_ptr);
5465 cfg->exception_ptr = exception;
5469 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
5471 return mini_type_is_reference (cfg, &klass->byval_arg);
5475 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
5478 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
5479 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
5480 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
5481 /* Optimize reg-reg moves away */
5483 * Can't optimize other opcodes, since sp[0] might point to
5484 * the last ins of a decomposed opcode.
5486 sp [0]->dreg = (cfg)->locals [n]->dreg;
5488 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
5493 * ldloca inhibits many optimizations so try to get rid of it in common
5496 static inline unsigned char *
5497 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
5506 local = read16 (ip + 2);
5510 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
5511 gboolean skip = FALSE;
5513 /* From the INITOBJ case */
5514 token = read32 (ip + 2);
5515 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
5516 CHECK_TYPELOAD (klass);
5517 if (mini_type_is_reference (cfg, &klass->byval_arg)) {
5518 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5519 } else if (MONO_TYPE_ISSTRUCT (&klass->byval_arg)) {
5520 MONO_EMIT_NEW_VZERO (cfg, cfg->locals [local]->dreg, klass);
5533 is_exception_class (MonoClass *class)
5536 if (class == mono_defaults.exception_class)
5538 class = class->parent;
5544 * is_jit_optimizer_disabled:
5546 * Determine whenever M's assembly has a DebuggableAttribute with the
5547 * IsJITOptimizerDisabled flag set.
5550 is_jit_optimizer_disabled (MonoMethod *m)
5552 MonoAssembly *ass = m->klass->image->assembly;
5553 MonoCustomAttrInfo* attrs;
5554 static MonoClass *klass;
5556 gboolean val = FALSE;
5559 if (ass->jit_optimizer_disabled_inited)
5560 return ass->jit_optimizer_disabled;
5563 klass = mono_class_from_name (mono_defaults.corlib, "System.Diagnostics", "DebuggableAttribute");
5566 ass->jit_optimizer_disabled = FALSE;
5567 mono_memory_barrier ();
5568 ass->jit_optimizer_disabled_inited = TRUE;
5572 attrs = mono_custom_attrs_from_assembly (ass);
5574 for (i = 0; i < attrs->num_attrs; ++i) {
5575 MonoCustomAttrEntry *attr = &attrs->attrs [i];
5578 MonoMethodSignature *sig;
5580 if (!attr->ctor || attr->ctor->klass != klass)
5582 /* Decode the attribute. See reflection.c */
5583 len = attr->data_size;
5584 p = (const char*)attr->data;
5585 g_assert (read16 (p) == 0x0001);
5588 // FIXME: Support named parameters
5589 sig = mono_method_signature (attr->ctor);
5590 if (sig->param_count != 2 || sig->params [0]->type != MONO_TYPE_BOOLEAN || sig->params [1]->type != MONO_TYPE_BOOLEAN)
5592 /* Two boolean arguments */
5596 mono_custom_attrs_free (attrs);
5599 ass->jit_optimizer_disabled = val;
5600 mono_memory_barrier ();
5601 ass->jit_optimizer_disabled_inited = TRUE;
5607 is_supported_tail_call (MonoCompile *cfg, MonoMethod *method, MonoMethod *cmethod, MonoMethodSignature *fsig)
5609 gboolean supported_tail_call;
5612 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
5613 supported_tail_call = MONO_ARCH_USE_OP_TAIL_CALL (mono_method_signature (method), mono_method_signature (cmethod));
5615 supported_tail_call = mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret);
5618 for (i = 0; i < fsig->param_count; ++i) {
5619 if (fsig->params [i]->byref || fsig->params [i]->type == MONO_TYPE_PTR || fsig->params [i]->type == MONO_TYPE_FNPTR)
5620 /* These can point to the current method's stack */
5621 supported_tail_call = FALSE;
5623 if (fsig->hasthis && cmethod->klass->valuetype)
5624 /* this might point to the current method's stack */
5625 supported_tail_call = FALSE;
5626 if (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)
5627 supported_tail_call = FALSE;
5628 if (cfg->method->save_lmf)
5629 supported_tail_call = FALSE;
5630 if (cmethod->wrapper_type && cmethod->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
5631 supported_tail_call = FALSE;
5633 /* Debugging support */
5635 if (supported_tail_call) {
5636 static int count = 0;
5638 if (getenv ("COUNT")) {
5639 if (count == atoi (getenv ("COUNT")))
5640 printf ("LAST: %s\n", mono_method_full_name (cmethod, TRUE));
5641 if (count > atoi (getenv ("COUNT")))
5642 supported_tail_call = FALSE;
5647 return supported_tail_call;
5650 /* the JIT intercepts ldflda instructions to the tlsdata field in ThreadLocal<T> and redirects
5651 * it to the thread local value based on the tls_offset field. Every other kind of access to
5652 * the field causes an assert.
5655 is_magic_tls_access (MonoClassField *field)
5657 if (strcmp (field->name, "tlsdata"))
5659 if (strcmp (field->parent->name, "ThreadLocal`1"))
5661 return field->parent->image == mono_defaults.corlib;
5664 /* emits the code needed to access a managed tls var (like ThreadStatic)
5665 * with the value of the tls offset in offset_reg. thread_ins represents the MonoInternalThread
5666 * pointer for the current thread.
5667 * Returns the MonoInst* representing the address of the tls var.
5670 emit_managed_static_data_access (MonoCompile *cfg, MonoInst *thread_ins, int offset_reg)
5673 int static_data_reg, array_reg, dreg;
5674 int offset2_reg, idx_reg;
5675 // inlined access to the tls data
5676 // idx = (offset >> 24) - 1;
5677 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
5678 static_data_reg = alloc_ireg (cfg);
5679 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, G_STRUCT_OFFSET (MonoInternalThread, static_data));
5680 idx_reg = alloc_ireg (cfg);
5681 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
5682 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
5683 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
5684 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
5685 array_reg = alloc_ireg (cfg);
5686 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
5687 offset2_reg = alloc_ireg (cfg);
5688 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
5689 dreg = alloc_ireg (cfg);
5690 EMIT_NEW_BIALU (cfg, addr, OP_PADD, dreg, array_reg, offset2_reg);
5695 * redirect access to the tlsdata field to the tls var given by the tls_offset field.
5696 * this address is cached per-method in cached_tls_addr.
5699 create_magic_tls_access (MonoCompile *cfg, MonoClassField *tls_field, MonoInst **cached_tls_addr, MonoInst *thread_local)
5701 MonoInst *load, *addr, *temp, *store, *thread_ins;
5702 MonoClassField *offset_field;
5704 if (*cached_tls_addr) {
5705 EMIT_NEW_TEMPLOAD (cfg, addr, (*cached_tls_addr)->inst_c0);
5708 thread_ins = mono_get_thread_intrinsic (cfg);
5709 offset_field = mono_class_get_field_from_name (tls_field->parent, "tls_offset");
5711 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, offset_field->type, thread_local->dreg, offset_field->offset);
5713 MONO_ADD_INS (cfg->cbb, thread_ins);
5715 MonoMethod *thread_method;
5716 thread_method = mono_class_get_method_from_name (mono_get_thread_class(), "CurrentInternalThread_internal", 0);
5717 thread_ins = mono_emit_method_call (cfg, thread_method, NULL, NULL);
5719 addr = emit_managed_static_data_access (cfg, thread_ins, load->dreg);
5720 addr->klass = mono_class_from_mono_type (tls_field->type);
5721 addr->type = STACK_MP;
5722 *cached_tls_addr = temp = mono_compile_create_var (cfg, type_from_stack_type (addr), OP_LOCAL);
5723 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, addr);
5725 EMIT_NEW_TEMPLOAD (cfg, addr, temp->inst_c0);
5730 * mono_method_to_ir:
5732 * Translate the .net IL into linear IR.
5735 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
5736 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
5737 guint inline_offset, gboolean is_virtual_call)
5740 MonoInst *ins, **sp, **stack_start;
5741 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
5742 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
5743 MonoMethod *cmethod, *method_definition;
5744 MonoInst **arg_array;
5745 MonoMethodHeader *header;
5747 guint32 token, ins_flag;
5749 MonoClass *constrained_call = NULL;
5750 unsigned char *ip, *end, *target, *err_pos;
5751 static double r8_0 = 0.0;
5752 MonoMethodSignature *sig;
5753 MonoGenericContext *generic_context = NULL;
5754 MonoGenericContainer *generic_container = NULL;
5755 MonoType **param_types;
5756 int i, n, start_new_bblock, dreg;
5757 int num_calls = 0, inline_costs = 0;
5758 int breakpoint_id = 0;
5760 MonoBoolean security, pinvoke;
5761 MonoSecurityManager* secman = NULL;
5762 MonoDeclSecurityActions actions;
5763 GSList *class_inits = NULL;
5764 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
5766 gboolean init_locals, seq_points, skip_dead_blocks;
5767 gboolean disable_inline;
5768 MonoInst *cached_tls_addr = NULL;
5770 disable_inline = is_jit_optimizer_disabled (method);
5772 /* serialization and xdomain stuff may need access to private fields and methods */
5773 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
5774 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
5775 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
5776 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
5777 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
5778 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
5780 dont_verify |= mono_security_get_mode () == MONO_SECURITY_MODE_SMCS_HACK;
5782 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
5783 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
5784 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
5785 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
5786 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_STELEMREF;
5788 image = method->klass->image;
5789 header = mono_method_get_header (method);
5791 MonoLoaderError *error;
5793 if ((error = mono_loader_get_last_error ())) {
5794 mono_cfg_set_exception (cfg, error->exception_type);
5796 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
5797 cfg->exception_message = g_strdup_printf ("Missing or incorrect header for method %s", cfg->method->name);
5799 goto exception_exit;
5801 generic_container = mono_method_get_generic_container (method);
5802 sig = mono_method_signature (method);
5803 num_args = sig->hasthis + sig->param_count;
5804 ip = (unsigned char*)header->code;
5805 cfg->cil_start = ip;
5806 end = ip + header->code_size;
5807 cfg->stat_cil_code_size += header->code_size;
5808 init_locals = header->init_locals;
5810 seq_points = cfg->gen_seq_points && cfg->method == method;
5813 * Methods without init_locals set could cause asserts in various passes
5818 method_definition = method;
5819 while (method_definition->is_inflated) {
5820 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
5821 method_definition = imethod->declaring;
5824 /* SkipVerification is not allowed if core-clr is enabled */
5825 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
5827 dont_verify_stloc = TRUE;
5830 if (mono_debug_using_mono_debugger ())
5831 cfg->keep_cil_nops = TRUE;
5833 if (sig->is_inflated)
5834 generic_context = mono_method_get_context (method);
5835 else if (generic_container)
5836 generic_context = &generic_container->context;
5837 cfg->generic_context = generic_context;
5839 if (!cfg->generic_sharing_context)
5840 g_assert (!sig->has_type_parameters);
5842 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
5843 g_assert (method->is_inflated);
5844 g_assert (mono_method_get_context (method)->method_inst);
5846 if (method->is_inflated && mono_method_get_context (method)->method_inst)
5847 g_assert (sig->generic_param_count);
5849 if (cfg->method == method) {
5850 cfg->real_offset = 0;
5852 cfg->real_offset = inline_offset;
5855 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
5856 cfg->cil_offset_to_bb_len = header->code_size;
5858 cfg->current_method = method;
5860 if (cfg->verbose_level > 2)
5861 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
5863 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
5865 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
5866 for (n = 0; n < sig->param_count; ++n)
5867 param_types [n + sig->hasthis] = sig->params [n];
5868 cfg->arg_types = param_types;
5870 dont_inline = g_list_prepend (dont_inline, method);
5871 if (cfg->method == method) {
5873 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
5874 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
5877 NEW_BBLOCK (cfg, start_bblock);
5878 cfg->bb_entry = start_bblock;
5879 start_bblock->cil_code = NULL;
5880 start_bblock->cil_length = 0;
5881 #if defined(__native_client_codegen__)
5882 MONO_INST_NEW (cfg, ins, OP_NACL_GC_SAFE_POINT);
5883 ins->dreg = alloc_dreg (cfg, STACK_I4);
5884 MONO_ADD_INS (start_bblock, ins);
5888 NEW_BBLOCK (cfg, end_bblock);
5889 cfg->bb_exit = end_bblock;
5890 end_bblock->cil_code = NULL;
5891 end_bblock->cil_length = 0;
5892 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
5893 g_assert (cfg->num_bblocks == 2);
5895 arg_array = cfg->args;
5897 if (header->num_clauses) {
5898 cfg->spvars = g_hash_table_new (NULL, NULL);
5899 cfg->exvars = g_hash_table_new (NULL, NULL);
5901 /* handle exception clauses */
5902 for (i = 0; i < header->num_clauses; ++i) {
5903 MonoBasicBlock *try_bb;
5904 MonoExceptionClause *clause = &header->clauses [i];
5905 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
5906 try_bb->real_offset = clause->try_offset;
5907 try_bb->try_start = TRUE;
5908 try_bb->region = ((i + 1) << 8) | clause->flags;
5909 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
5910 tblock->real_offset = clause->handler_offset;
5911 tblock->flags |= BB_EXCEPTION_HANDLER;
5913 link_bblock (cfg, try_bb, tblock);
5915 if (*(ip + clause->handler_offset) == CEE_POP)
5916 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
5918 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
5919 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
5920 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
5921 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5922 MONO_ADD_INS (tblock, ins);
5924 if (seq_points && clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY) {
5925 /* finally clauses already have a seq point */
5926 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
5927 MONO_ADD_INS (tblock, ins);
5930 /* todo: is a fault block unsafe to optimize? */
5931 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
5932 tblock->flags |= BB_EXCEPTION_UNSAFE;
5936 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
5938 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
5940 /* catch and filter blocks get the exception object on the stack */
5941 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
5942 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5943 MonoInst *dummy_use;
5945 /* mostly like handle_stack_args (), but just sets the input args */
5946 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
5947 tblock->in_scount = 1;
5948 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5949 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5952 * Add a dummy use for the exvar so its liveness info will be
5956 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
5958 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5959 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
5960 tblock->flags |= BB_EXCEPTION_HANDLER;
5961 tblock->real_offset = clause->data.filter_offset;
5962 tblock->in_scount = 1;
5963 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5964 /* The filter block shares the exvar with the handler block */
5965 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5966 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5967 MONO_ADD_INS (tblock, ins);
5971 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
5972 clause->data.catch_class &&
5973 cfg->generic_sharing_context &&
5974 mono_class_check_context_used (clause->data.catch_class)) {
5976 * In shared generic code with catch
5977 * clauses containing type variables
5978 * the exception handling code has to
5979 * be able to get to the rgctx.
5980 * Therefore we have to make sure that
5981 * the vtable/mrgctx argument (for
5982 * static or generic methods) or the
5983 * "this" argument (for non-static
5984 * methods) are live.
5986 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5987 mini_method_get_context (method)->method_inst ||
5988 method->klass->valuetype) {
5989 mono_get_vtable_var (cfg);
5991 MonoInst *dummy_use;
5993 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
5998 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
5999 cfg->cbb = start_bblock;
6000 cfg->args = arg_array;
6001 mono_save_args (cfg, sig, inline_args);
6004 /* FIRST CODE BLOCK */
6005 NEW_BBLOCK (cfg, bblock);
6006 bblock->cil_code = ip;
6010 ADD_BBLOCK (cfg, bblock);
6012 if (cfg->method == method) {
6013 breakpoint_id = mono_debugger_method_has_breakpoint (method);
6014 if (breakpoint_id && (mono_debug_format != MONO_DEBUG_FORMAT_DEBUGGER)) {
6015 MONO_INST_NEW (cfg, ins, OP_BREAK);
6016 MONO_ADD_INS (bblock, ins);
6020 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
6021 secman = mono_security_manager_get_methods ();
6023 security = (secman && mono_method_has_declsec (method));
6024 /* at this point having security doesn't mean we have any code to generate */
6025 if (security && (cfg->method == method)) {
6026 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
6027 * And we do not want to enter the next section (with allocation) if we
6028 * have nothing to generate */
6029 security = mono_declsec_get_demands (method, &actions);
6032 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
6033 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
6035 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
6036 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
6037 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
6039 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
6040 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
6044 mono_custom_attrs_free (custom);
6047 custom = mono_custom_attrs_from_class (wrapped->klass);
6048 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
6052 mono_custom_attrs_free (custom);
6055 /* not a P/Invoke after all */
6060 if ((init_locals || (cfg->method == method && (cfg->opt & MONO_OPT_SHARED))) || cfg->compile_aot || security || pinvoke) {
6061 /* we use a separate basic block for the initialization code */
6062 NEW_BBLOCK (cfg, init_localsbb);
6063 cfg->bb_init = init_localsbb;
6064 init_localsbb->real_offset = cfg->real_offset;
6065 start_bblock->next_bb = init_localsbb;
6066 init_localsbb->next_bb = bblock;
6067 link_bblock (cfg, start_bblock, init_localsbb);
6068 link_bblock (cfg, init_localsbb, bblock);
6070 cfg->cbb = init_localsbb;
6072 start_bblock->next_bb = bblock;
6073 link_bblock (cfg, start_bblock, bblock);
6076 /* at this point we know, if security is TRUE, that some code needs to be generated */
6077 if (security && (cfg->method == method)) {
6080 cfg->stat_cas_demand_generation++;
6082 if (actions.demand.blob) {
6083 /* Add code for SecurityAction.Demand */
6084 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
6085 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
6086 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
6087 mono_emit_method_call (cfg, secman->demand, args, NULL);
6089 if (actions.noncasdemand.blob) {
6090 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
6091 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
6092 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
6093 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
6094 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
6095 mono_emit_method_call (cfg, secman->demand, args, NULL);
6097 if (actions.demandchoice.blob) {
6098 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
6099 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
6100 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
6101 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
6102 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
6106 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
6108 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
6111 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
6112 /* check if this is native code, e.g. an icall or a p/invoke */
6113 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
6114 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
6116 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
6117 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
6119 /* if this ia a native call then it can only be JITted from platform code */
6120 if ((icall || pinvk) && method->klass && method->klass->image) {
6121 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
6122 MonoException *ex = icall ? mono_get_exception_security () :
6123 mono_get_exception_method_access ();
6124 emit_throw_exception (cfg, ex);
6131 if (header->code_size == 0)
6134 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
6139 if (cfg->method == method)
6140 mono_debug_init_method (cfg, bblock, breakpoint_id);
6142 for (n = 0; n < header->num_locals; ++n) {
6143 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
6148 /* We force the vtable variable here for all shared methods
6149 for the possibility that they might show up in a stack
6150 trace where their exact instantiation is needed. */
6151 if (cfg->generic_sharing_context && method == cfg->method) {
6152 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
6153 mini_method_get_context (method)->method_inst ||
6154 method->klass->valuetype) {
6155 mono_get_vtable_var (cfg);
6157 /* FIXME: Is there a better way to do this?
6158 We need the variable live for the duration
6159 of the whole method. */
6160 cfg->args [0]->flags |= MONO_INST_INDIRECT;
6164 /* add a check for this != NULL to inlined methods */
6165 if (is_virtual_call) {
6168 NEW_ARGLOAD (cfg, arg_ins, 0);
6169 MONO_ADD_INS (cfg->cbb, arg_ins);
6170 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
6173 skip_dead_blocks = !dont_verify;
6174 if (skip_dead_blocks) {
6175 original_bb = bb = mono_basic_block_split (method, &error);
6176 if (!mono_error_ok (&error)) {
6177 mono_error_cleanup (&error);
6183 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
6184 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
6187 start_new_bblock = 0;
6190 if (cfg->method == method)
6191 cfg->real_offset = ip - header->code;
6193 cfg->real_offset = inline_offset;
6198 if (start_new_bblock) {
6199 bblock->cil_length = ip - bblock->cil_code;
6200 if (start_new_bblock == 2) {
6201 g_assert (ip == tblock->cil_code);
6203 GET_BBLOCK (cfg, tblock, ip);
6205 bblock->next_bb = tblock;
6208 start_new_bblock = 0;
6209 for (i = 0; i < bblock->in_scount; ++i) {
6210 if (cfg->verbose_level > 3)
6211 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
6212 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
6216 g_slist_free (class_inits);
6219 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
6220 link_bblock (cfg, bblock, tblock);
6221 if (sp != stack_start) {
6222 handle_stack_args (cfg, stack_start, sp - stack_start);
6224 CHECK_UNVERIFIABLE (cfg);
6226 bblock->next_bb = tblock;
6229 for (i = 0; i < bblock->in_scount; ++i) {
6230 if (cfg->verbose_level > 3)
6231 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
6232 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
6235 g_slist_free (class_inits);
6240 if (skip_dead_blocks) {
6241 int ip_offset = ip - header->code;
6243 if (ip_offset == bb->end)
6247 int op_size = mono_opcode_size (ip, end);
6248 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
6250 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
6252 if (ip_offset + op_size == bb->end) {
6253 MONO_INST_NEW (cfg, ins, OP_NOP);
6254 MONO_ADD_INS (bblock, ins);
6255 start_new_bblock = 1;
6263 * Sequence points are points where the debugger can place a breakpoint.
6264 * Currently, we generate these automatically at points where the IL
6267 if (seq_points && sp == stack_start) {
6269 * Make methods interruptable at the beginning, and at the targets of
6270 * backward branches.
6271 * Also, do this at the start of every bblock in methods with clauses too,
6272 * to be able to handle instructions with inprecise control flow like
6274 * Backward branches are handled at the end of method-to-ir ().
6276 gboolean intr_loc = ip == header->code || (!cfg->cbb->last_ins && cfg->header->num_clauses);
6278 /* Avoid sequence points on empty IL like .volatile */
6279 // FIXME: Enable this
6280 //if (!(cfg->cbb->last_ins && cfg->cbb->last_ins->opcode == OP_SEQ_POINT)) {
6281 NEW_SEQ_POINT (cfg, ins, ip - header->code, intr_loc);
6282 MONO_ADD_INS (cfg->cbb, ins);
6285 bblock->real_offset = cfg->real_offset;
6287 if ((cfg->method == method) && cfg->coverage_info) {
6288 guint32 cil_offset = ip - header->code;
6289 cfg->coverage_info->data [cil_offset].cil_code = ip;
6291 /* TODO: Use an increment here */
6292 #if defined(TARGET_X86)
6293 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
6294 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
6296 MONO_ADD_INS (cfg->cbb, ins);
6298 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
6299 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
6303 if (cfg->verbose_level > 3)
6304 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
6308 if (seq_points && sp != stack_start) {
6310 * The C# compiler uses these nops to notify the JIT that it should
6311 * insert seq points.
6313 NEW_SEQ_POINT (cfg, ins, ip - header->code, FALSE);
6314 MONO_ADD_INS (cfg->cbb, ins);
6316 if (cfg->keep_cil_nops)
6317 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
6319 MONO_INST_NEW (cfg, ins, OP_NOP);
6321 MONO_ADD_INS (bblock, ins);
6324 if (should_insert_brekpoint (cfg->method)) {
6325 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
6327 MONO_INST_NEW (cfg, ins, OP_NOP);
6330 MONO_ADD_INS (bblock, ins);
6336 CHECK_STACK_OVF (1);
6337 n = (*ip)-CEE_LDARG_0;
6339 EMIT_NEW_ARGLOAD (cfg, ins, n);
6347 CHECK_STACK_OVF (1);
6348 n = (*ip)-CEE_LDLOC_0;
6350 EMIT_NEW_LOCLOAD (cfg, ins, n);
6359 n = (*ip)-CEE_STLOC_0;
6362 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
6364 emit_stloc_ir (cfg, sp, header, n);
6371 CHECK_STACK_OVF (1);
6374 EMIT_NEW_ARGLOAD (cfg, ins, n);
6380 CHECK_STACK_OVF (1);
6383 NEW_ARGLOADA (cfg, ins, n);
6384 MONO_ADD_INS (cfg->cbb, ins);
6394 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
6396 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
6401 CHECK_STACK_OVF (1);
6404 EMIT_NEW_LOCLOAD (cfg, ins, n);
6408 case CEE_LDLOCA_S: {
6409 unsigned char *tmp_ip;
6411 CHECK_STACK_OVF (1);
6412 CHECK_LOCAL (ip [1]);
6414 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
6420 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
6429 CHECK_LOCAL (ip [1]);
6430 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
6432 emit_stloc_ir (cfg, sp, header, ip [1]);
6437 CHECK_STACK_OVF (1);
6438 EMIT_NEW_PCONST (cfg, ins, NULL);
6439 ins->type = STACK_OBJ;
6444 CHECK_STACK_OVF (1);
6445 EMIT_NEW_ICONST (cfg, ins, -1);
6458 CHECK_STACK_OVF (1);
6459 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
6465 CHECK_STACK_OVF (1);
6467 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
6473 CHECK_STACK_OVF (1);
6474 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
6480 CHECK_STACK_OVF (1);
6481 MONO_INST_NEW (cfg, ins, OP_I8CONST);
6482 ins->type = STACK_I8;
6483 ins->dreg = alloc_dreg (cfg, STACK_I8);
6485 ins->inst_l = (gint64)read64 (ip);
6486 MONO_ADD_INS (bblock, ins);
6492 gboolean use_aotconst = FALSE;
6494 #ifdef TARGET_POWERPC
6495 /* FIXME: Clean this up */
6496 if (cfg->compile_aot)
6497 use_aotconst = TRUE;
6500 /* FIXME: we should really allocate this only late in the compilation process */
6501 f = mono_domain_alloc (cfg->domain, sizeof (float));
6503 CHECK_STACK_OVF (1);
6509 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
6511 dreg = alloc_freg (cfg);
6512 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
6513 ins->type = STACK_R8;
6515 MONO_INST_NEW (cfg, ins, OP_R4CONST);
6516 ins->type = STACK_R8;
6517 ins->dreg = alloc_dreg (cfg, STACK_R8);
6519 MONO_ADD_INS (bblock, ins);
6529 gboolean use_aotconst = FALSE;
6531 #ifdef TARGET_POWERPC
6532 /* FIXME: Clean this up */
6533 if (cfg->compile_aot)
6534 use_aotconst = TRUE;
6537 /* FIXME: we should really allocate this only late in the compilation process */
6538 d = mono_domain_alloc (cfg->domain, sizeof (double));
6540 CHECK_STACK_OVF (1);
6546 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
6548 dreg = alloc_freg (cfg);
6549 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
6550 ins->type = STACK_R8;
6552 MONO_INST_NEW (cfg, ins, OP_R8CONST);
6553 ins->type = STACK_R8;
6554 ins->dreg = alloc_dreg (cfg, STACK_R8);
6556 MONO_ADD_INS (bblock, ins);
6565 MonoInst *temp, *store;
6567 CHECK_STACK_OVF (1);
6571 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
6572 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
6574 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
6577 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
6590 if (sp [0]->type == STACK_R8)
6591 /* we need to pop the value from the x86 FP stack */
6592 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
6601 if (stack_start != sp)
6603 token = read32 (ip + 1);
6604 /* FIXME: check the signature matches */
6605 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6607 if (!cmethod || mono_loader_get_last_error ())
6610 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
6611 GENERIC_SHARING_FAILURE (CEE_JMP);
6613 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
6614 CHECK_CFG_EXCEPTION;
6616 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6618 MonoMethodSignature *fsig = mono_method_signature (cmethod);
6621 /* Handle tail calls similarly to calls */
6622 n = fsig->param_count + fsig->hasthis;
6624 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
6625 call->method = cmethod;
6626 call->tail_call = TRUE;
6627 call->signature = mono_method_signature (cmethod);
6628 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
6629 call->inst.inst_p0 = cmethod;
6630 for (i = 0; i < n; ++i)
6631 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
6633 mono_arch_emit_call (cfg, call);
6634 MONO_ADD_INS (bblock, (MonoInst*)call);
6637 for (i = 0; i < num_args; ++i)
6638 /* Prevent arguments from being optimized away */
6639 arg_array [i]->flags |= MONO_INST_VOLATILE;
6641 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
6642 ins = (MonoInst*)call;
6643 ins->inst_p0 = cmethod;
6644 MONO_ADD_INS (bblock, ins);
6648 start_new_bblock = 1;
6653 case CEE_CALLVIRT: {
6654 MonoInst *addr = NULL;
6655 MonoMethodSignature *fsig = NULL;
6657 int virtual = *ip == CEE_CALLVIRT;
6658 int calli = *ip == CEE_CALLI;
6659 gboolean pass_imt_from_rgctx = FALSE;
6660 MonoInst *imt_arg = NULL;
6661 gboolean pass_vtable = FALSE;
6662 gboolean pass_mrgctx = FALSE;
6663 MonoInst *vtable_arg = NULL;
6664 gboolean check_this = FALSE;
6665 gboolean supported_tail_call = FALSE;
6668 token = read32 (ip + 1);
6675 if (method->wrapper_type != MONO_WRAPPER_NONE)
6676 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
6678 fsig = mono_metadata_parse_signature (image, token);
6680 n = fsig->param_count + fsig->hasthis;
6682 if (method->dynamic && fsig->pinvoke) {
6686 * This is a call through a function pointer using a pinvoke
6687 * signature. Have to create a wrapper and call that instead.
6688 * FIXME: This is very slow, need to create a wrapper at JIT time
6689 * instead based on the signature.
6691 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
6692 EMIT_NEW_PCONST (cfg, args [1], fsig);
6694 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
6697 MonoMethod *cil_method;
6699 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6700 if (constrained_call && cfg->verbose_level > 2)
6701 printf ("DM Constrained call to %s\n", mono_type_get_full_name (constrained_call));
6702 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
6703 cil_method = cmethod;
6704 if (constrained_call && !((constrained_call->byval_arg.type == MONO_TYPE_VAR ||
6705 constrained_call->byval_arg.type == MONO_TYPE_MVAR) &&
6706 cfg->generic_sharing_context)) {
6707 cmethod = mono_get_method_constrained_with_method (image, cil_method, constrained_call, generic_context);
6709 } else if (constrained_call) {
6710 if (cfg->verbose_level > 2)
6711 printf ("Constrained call to %s\n", mono_type_get_full_name (constrained_call));
6713 if ((constrained_call->byval_arg.type == MONO_TYPE_VAR || constrained_call->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
6715 * This is needed since get_method_constrained can't find
6716 * the method in klass representing a type var.
6717 * The type var is guaranteed to be a reference type in this
6720 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6721 cil_method = cmethod;
6722 g_assert (!cmethod->klass->valuetype);
6724 cmethod = mono_get_method_constrained (image, token, constrained_call, generic_context, &cil_method);
6727 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6728 cil_method = cmethod;
6731 if (!cmethod || mono_loader_get_last_error ())
6733 if (!dont_verify && !cfg->skip_visibility) {
6734 MonoMethod *target_method = cil_method;
6735 if (method->is_inflated) {
6736 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
6738 if (!mono_method_can_access_method (method_definition, target_method) &&
6739 !mono_method_can_access_method (method, cil_method))
6740 METHOD_ACCESS_FAILURE;
6743 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
6744 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
6746 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
6747 /* MS.NET seems to silently convert this to a callvirt */
6752 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
6753 * converts to a callvirt.
6755 * tests/bug-515884.il is an example of this behavior
6757 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
6758 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
6759 if (!virtual && cmethod->klass->marshalbyref && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
6763 if (!cmethod->klass->inited)
6764 if (!mono_class_init (cmethod->klass))
6767 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
6768 mini_class_is_system_array (cmethod->klass)) {
6769 array_rank = cmethod->klass->rank;
6770 fsig = mono_method_signature (cmethod);
6772 fsig = mono_method_signature (cmethod);
6777 if (fsig->pinvoke) {
6778 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
6779 check_for_pending_exc, FALSE);
6780 fsig = mono_method_signature (wrapper);
6781 } else if (constrained_call) {
6782 fsig = mono_method_signature (cmethod);
6784 fsig = mono_method_get_signature_full (cmethod, image, token, generic_context);
6788 mono_save_token_info (cfg, image, token, cil_method);
6790 n = fsig->param_count + fsig->hasthis;
6792 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
6793 if (check_linkdemand (cfg, method, cmethod))
6795 CHECK_CFG_EXCEPTION;
6798 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
6799 g_assert_not_reached ();
6802 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
6805 if (!cfg->generic_sharing_context && cmethod)
6806 g_assert (!mono_method_check_context_used (cmethod));
6810 //g_assert (!virtual || fsig->hasthis);
6814 if (constrained_call) {
6816 * We have the `constrained.' prefix opcode.
6818 if (constrained_call->valuetype && !cmethod->klass->valuetype) {
6820 * The type parameter is instantiated as a valuetype,
6821 * but that type doesn't override the method we're
6822 * calling, so we need to box `this'.
6824 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
6825 ins->klass = constrained_call;
6826 sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call));
6827 CHECK_CFG_EXCEPTION;
6828 } else if (!constrained_call->valuetype) {
6829 int dreg = alloc_ireg_ref (cfg);
6832 * The type parameter is instantiated as a reference
6833 * type. We have a managed pointer on the stack, so
6834 * we need to dereference it here.
6836 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
6837 ins->type = STACK_OBJ;
6839 } else if (cmethod->klass->valuetype)
6841 constrained_call = NULL;
6844 if (*ip != CEE_CALLI && check_call_signature (cfg, fsig, sp))
6848 * If the callee is a shared method, then its static cctor
6849 * might not get called after the call was patched.
6851 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable_impl (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
6852 emit_generic_class_init (cfg, cmethod->klass);
6853 CHECK_TYPELOAD (cmethod->klass);
6856 if (cmethod && ((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
6857 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
6858 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6859 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
6860 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6863 * Pass vtable iff target method might
6864 * be shared, which means that sharing
6865 * is enabled for its class and its
6866 * context is sharable (and it's not a
6869 if (sharing_enabled && context_sharable &&
6870 !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
6874 if (cmethod && mini_method_get_context (cmethod) &&
6875 mini_method_get_context (cmethod)->method_inst) {
6876 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6877 MonoGenericContext *context = mini_method_get_context (cmethod);
6878 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6880 g_assert (!pass_vtable);
6882 if (sharing_enabled && context_sharable)
6886 if (cfg->generic_sharing_context && cmethod) {
6887 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
6889 context_used = mono_method_check_context_used (cmethod);
6891 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
6892 /* Generic method interface
6893 calls are resolved via a
6894 helper function and don't
6896 if (!cmethod_context || !cmethod_context->method_inst)
6897 pass_imt_from_rgctx = TRUE;
6901 * If a shared method calls another
6902 * shared method then the caller must
6903 * have a generic sharing context
6904 * because the magic trampoline
6905 * requires it. FIXME: We shouldn't
6906 * have to force the vtable/mrgctx
6907 * variable here. Instead there
6908 * should be a flag in the cfg to
6909 * request a generic sharing context.
6912 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
6913 mono_get_vtable_var (cfg);
6918 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
6920 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
6922 CHECK_TYPELOAD (cmethod->klass);
6923 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
6928 g_assert (!vtable_arg);
6930 if (!cfg->compile_aot) {
6932 * emit_get_rgctx_method () calls mono_class_vtable () so check
6933 * for type load errors before.
6935 mono_class_setup_vtable (cmethod->klass);
6936 CHECK_TYPELOAD (cmethod->klass);
6939 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
6941 /* !marshalbyref is needed to properly handle generic methods + remoting */
6942 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
6943 MONO_METHOD_IS_FINAL (cmethod)) &&
6944 !cmethod->klass->marshalbyref) {
6951 if (pass_imt_from_rgctx) {
6952 g_assert (!pass_vtable);
6955 imt_arg = emit_get_rgctx_method (cfg, context_used,
6956 cmethod, MONO_RGCTX_INFO_METHOD);
6960 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
6962 /* Calling virtual generic methods */
6963 if (cmethod && virtual &&
6964 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
6965 !(MONO_METHOD_IS_FINAL (cmethod) &&
6966 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
6967 mono_method_signature (cmethod)->generic_param_count) {
6968 MonoInst *this_temp, *this_arg_temp, *store;
6969 MonoInst *iargs [4];
6971 g_assert (mono_method_signature (cmethod)->is_inflated);
6973 /* Prevent inlining of methods that contain indirect calls */
6976 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK && defined(MONO_ARCH_GSHARED_SUPPORTED)
6977 if (cmethod->wrapper_type == MONO_WRAPPER_NONE && mono_use_imt) {
6978 g_assert (!imt_arg);
6980 g_assert (cmethod->is_inflated);
6981 imt_arg = emit_get_rgctx_method (cfg, context_used,
6982 cmethod, MONO_RGCTX_INFO_METHOD);
6983 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp, sp [0], imt_arg, NULL);
6987 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
6988 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
6989 MONO_ADD_INS (bblock, store);
6991 /* FIXME: This should be a managed pointer */
6992 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
6994 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
6995 iargs [1] = emit_get_rgctx_method (cfg, context_used,
6996 cmethod, MONO_RGCTX_INFO_METHOD);
6997 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
6998 addr = mono_emit_jit_icall (cfg,
6999 mono_helper_compile_generic_method, iargs);
7001 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
7003 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL);
7006 if (!MONO_TYPE_IS_VOID (fsig->ret))
7007 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
7009 CHECK_CFG_EXCEPTION;
7017 * Implement a workaround for the inherent races involved in locking:
7023 * If a thread abort happens between the call to Monitor.Enter () and the start of the
7024 * try block, the Exit () won't be executed, see:
7025 * http://www.bluebytesoftware.com/blog/2007/01/30/MonitorEnterThreadAbortsAndOrphanedLocks.aspx
7026 * To work around this, we extend such try blocks to include the last x bytes
7027 * of the Monitor.Enter () call.
7029 if (cmethod && cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
7030 MonoBasicBlock *tbb;
7032 GET_BBLOCK (cfg, tbb, ip + 5);
7034 * Only extend try blocks with a finally, to avoid catching exceptions thrown
7035 * from Monitor.Enter like ArgumentNullException.
7037 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
7038 /* Mark this bblock as needing to be extended */
7039 tbb->extend_try_block = TRUE;
7043 /* Conversion to a JIT intrinsic */
7044 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
7046 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
7047 type_to_eval_stack_type ((cfg), fsig->ret, ins);
7052 CHECK_CFG_EXCEPTION;
7060 if ((cfg->opt & MONO_OPT_INLINE) && cmethod &&
7061 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
7062 !disable_inline && mono_method_check_inlining (cfg, cmethod) &&
7063 !g_list_find (dont_inline, cmethod)) {
7065 gboolean always = FALSE;
7067 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
7068 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
7069 /* Prevent inlining of methods that call wrappers */
7071 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
7075 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, always))) {
7077 cfg->real_offset += 5;
7080 if (!MONO_TYPE_IS_VOID (fsig->ret))
7081 /* *sp is already set by inline_method */
7084 inline_costs += costs;
7090 inline_costs += 10 * num_calls++;
7092 /* Tail recursion elimination */
7093 if ((cfg->opt & MONO_OPT_TAILC) && *ip == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
7094 gboolean has_vtargs = FALSE;
7097 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
7100 /* keep it simple */
7101 for (i = fsig->param_count - 1; i >= 0; i--) {
7102 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
7107 for (i = 0; i < n; ++i)
7108 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
7109 MONO_INST_NEW (cfg, ins, OP_BR);
7110 MONO_ADD_INS (bblock, ins);
7111 tblock = start_bblock->out_bb [0];
7112 link_bblock (cfg, bblock, tblock);
7113 ins->inst_target_bb = tblock;
7114 start_new_bblock = 1;
7116 /* skip the CEE_RET, too */
7117 if (ip_in_bb (cfg, bblock, ip + 5))
7127 /* Generic sharing */
7128 /* FIXME: only do this for generic methods if
7129 they are not shared! */
7130 if (context_used && !imt_arg && !array_rank &&
7131 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
7132 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
7133 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
7134 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
7137 g_assert (cfg->generic_sharing_context && cmethod);
7141 * We are compiling a call to a
7142 * generic method from shared code,
7143 * which means that we have to look up
7144 * the method in the rgctx and do an
7147 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7150 /* Indirect calls */
7152 g_assert (!imt_arg);
7154 if (*ip == CEE_CALL)
7155 g_assert (context_used);
7156 else if (*ip == CEE_CALLI)
7157 g_assert (!vtable_arg);
7159 /* FIXME: what the hell is this??? */
7160 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
7161 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
7163 /* Prevent inlining of methods with indirect calls */
7167 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, vtable_arg);
7169 if (addr->opcode == OP_AOTCONST && addr->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
7171 * Instead of emitting an indirect call, emit a direct call
7172 * with the contents of the aotconst as the patch info.
7174 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_p0, fsig, sp);
7176 } else if (addr->opcode == OP_GOT_ENTRY && addr->inst_right->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
7177 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_right->inst_left, fsig, sp);
7180 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL);
7183 if (!MONO_TYPE_IS_VOID (fsig->ret))
7184 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
7186 CHECK_CFG_EXCEPTION;
7197 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
7198 MonoInst *val = sp [fsig->param_count];
7200 if (val->type == STACK_OBJ) {
7201 MonoInst *iargs [2];
7206 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
7209 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
7210 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, val->dreg);
7211 if (cfg->gen_write_barriers && val->type == STACK_OBJ && !(val->opcode == OP_PCONST && val->inst_c0 == 0))
7212 emit_write_barrier (cfg, addr, val, 0);
7213 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
7214 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
7216 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
7219 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
7220 if (!cmethod->klass->element_class->valuetype && !readonly)
7221 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
7222 CHECK_TYPELOAD (cmethod->klass);
7225 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
7228 g_assert_not_reached ();
7231 CHECK_CFG_EXCEPTION;
7238 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
7240 if (!MONO_TYPE_IS_VOID (fsig->ret))
7241 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
7243 CHECK_CFG_EXCEPTION;
7250 /* Tail prefix / tail call optimization */
7252 /* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests */
7253 /* FIXME: runtime generic context pointer for jumps? */
7254 /* FIXME: handle this for generic sharing eventually */
7255 supported_tail_call = cmethod &&
7256 ((((ins_flag & MONO_INST_TAILCALL) && (*ip == CEE_CALL))
7257 ))//|| ((cfg->opt & MONO_OPT_TAILC) && *ip == CEE_CALL && ip [5] == CEE_RET))
7258 && !vtable_arg && !cfg->generic_sharing_context && is_supported_tail_call (cfg, method, cmethod, fsig);
7260 if (supported_tail_call) {
7263 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
7266 //printf ("HIT: %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
7268 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
7269 /* Handle tail calls similarly to calls */
7270 call = mono_emit_call_args (cfg, mono_method_signature (cmethod), sp, FALSE, FALSE, TRUE, FALSE);
7272 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
7273 call->tail_call = TRUE;
7274 call->method = cmethod;
7275 call->signature = mono_method_signature (cmethod);
7278 * We implement tail calls by storing the actual arguments into the
7279 * argument variables, then emitting a CEE_JMP.
7281 for (i = 0; i < n; ++i) {
7282 /* Prevent argument from being register allocated */
7283 arg_array [i]->flags |= MONO_INST_VOLATILE;
7284 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
7288 ins = (MonoInst*)call;
7289 ins->inst_p0 = cmethod;
7290 ins->inst_p1 = arg_array [0];
7291 MONO_ADD_INS (bblock, ins);
7292 link_bblock (cfg, bblock, end_bblock);
7293 start_new_bblock = 1;
7295 CHECK_CFG_EXCEPTION;
7300 // FIXME: Eliminate unreachable epilogs
7303 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
7304 * only reachable from this call.
7306 GET_BBLOCK (cfg, tblock, ip);
7307 if (tblock == bblock || tblock->in_count == 0)
7314 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL,
7315 imt_arg, vtable_arg);
7317 if (!MONO_TYPE_IS_VOID (fsig->ret))
7318 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
7320 CHECK_CFG_EXCEPTION;
7327 if (cfg->method != method) {
7328 /* return from inlined method */
7330 * If in_count == 0, that means the ret is unreachable due to
7331 * being preceeded by a throw. In that case, inline_method () will
7332 * handle setting the return value
7333 * (test case: test_0_inline_throw ()).
7335 if (return_var && cfg->cbb->in_count) {
7339 //g_assert (returnvar != -1);
7340 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
7341 cfg->ret_var_set = TRUE;
7345 MonoType *ret_type = mono_method_signature (method)->ret;
7349 * Place a seq point here too even through the IL stack is not
7350 * empty, so a step over on
7353 * will work correctly.
7355 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
7356 MONO_ADD_INS (cfg->cbb, ins);
7359 g_assert (!return_var);
7363 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
7366 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
7369 if (!cfg->vret_addr) {
7372 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
7374 EMIT_NEW_RETLOADA (cfg, ret_addr);
7376 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
7377 ins->klass = mono_class_from_mono_type (ret_type);
7380 #ifdef MONO_ARCH_SOFT_FLOAT
7381 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
7382 MonoInst *iargs [1];
7386 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
7387 mono_arch_emit_setret (cfg, method, conv);
7389 mono_arch_emit_setret (cfg, method, *sp);
7392 mono_arch_emit_setret (cfg, method, *sp);
7397 if (sp != stack_start)
7399 MONO_INST_NEW (cfg, ins, OP_BR);
7401 ins->inst_target_bb = end_bblock;
7402 MONO_ADD_INS (bblock, ins);
7403 link_bblock (cfg, bblock, end_bblock);
7404 start_new_bblock = 1;
7408 MONO_INST_NEW (cfg, ins, OP_BR);
7410 target = ip + 1 + (signed char)(*ip);
7412 GET_BBLOCK (cfg, tblock, target);
7413 link_bblock (cfg, bblock, tblock);
7414 ins->inst_target_bb = tblock;
7415 if (sp != stack_start) {
7416 handle_stack_args (cfg, stack_start, sp - stack_start);
7418 CHECK_UNVERIFIABLE (cfg);
7420 MONO_ADD_INS (bblock, ins);
7421 start_new_bblock = 1;
7422 inline_costs += BRANCH_COST;
7436 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
7438 target = ip + 1 + *(signed char*)ip;
7444 inline_costs += BRANCH_COST;
7448 MONO_INST_NEW (cfg, ins, OP_BR);
7451 target = ip + 4 + (gint32)read32(ip);
7453 GET_BBLOCK (cfg, tblock, target);
7454 link_bblock (cfg, bblock, tblock);
7455 ins->inst_target_bb = tblock;
7456 if (sp != stack_start) {
7457 handle_stack_args (cfg, stack_start, sp - stack_start);
7459 CHECK_UNVERIFIABLE (cfg);
7462 MONO_ADD_INS (bblock, ins);
7464 start_new_bblock = 1;
7465 inline_costs += BRANCH_COST;
7472 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
7473 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
7474 guint32 opsize = is_short ? 1 : 4;
7476 CHECK_OPSIZE (opsize);
7478 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
7481 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
7486 GET_BBLOCK (cfg, tblock, target);
7487 link_bblock (cfg, bblock, tblock);
7488 GET_BBLOCK (cfg, tblock, ip);
7489 link_bblock (cfg, bblock, tblock);
7491 if (sp != stack_start) {
7492 handle_stack_args (cfg, stack_start, sp - stack_start);
7493 CHECK_UNVERIFIABLE (cfg);
7496 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
7497 cmp->sreg1 = sp [0]->dreg;
7498 type_from_op (cmp, sp [0], NULL);
7501 #if SIZEOF_REGISTER == 4
7502 if (cmp->opcode == OP_LCOMPARE_IMM) {
7503 /* Convert it to OP_LCOMPARE */
7504 MONO_INST_NEW (cfg, ins, OP_I8CONST);
7505 ins->type = STACK_I8;
7506 ins->dreg = alloc_dreg (cfg, STACK_I8);
7508 MONO_ADD_INS (bblock, ins);
7509 cmp->opcode = OP_LCOMPARE;
7510 cmp->sreg2 = ins->dreg;
7513 MONO_ADD_INS (bblock, cmp);
7515 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
7516 type_from_op (ins, sp [0], NULL);
7517 MONO_ADD_INS (bblock, ins);
7518 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
7519 GET_BBLOCK (cfg, tblock, target);
7520 ins->inst_true_bb = tblock;
7521 GET_BBLOCK (cfg, tblock, ip);
7522 ins->inst_false_bb = tblock;
7523 start_new_bblock = 2;
7526 inline_costs += BRANCH_COST;
7541 MONO_INST_NEW (cfg, ins, *ip);
7543 target = ip + 4 + (gint32)read32(ip);
7549 inline_costs += BRANCH_COST;
7553 MonoBasicBlock **targets;
7554 MonoBasicBlock *default_bblock;
7555 MonoJumpInfoBBTable *table;
7556 int offset_reg = alloc_preg (cfg);
7557 int target_reg = alloc_preg (cfg);
7558 int table_reg = alloc_preg (cfg);
7559 int sum_reg = alloc_preg (cfg);
7560 gboolean use_op_switch;
7564 n = read32 (ip + 1);
7567 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
7571 CHECK_OPSIZE (n * sizeof (guint32));
7572 target = ip + n * sizeof (guint32);
7574 GET_BBLOCK (cfg, default_bblock, target);
7575 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
7577 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
7578 for (i = 0; i < n; ++i) {
7579 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
7580 targets [i] = tblock;
7581 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
7585 if (sp != stack_start) {
7587 * Link the current bb with the targets as well, so handle_stack_args
7588 * will set their in_stack correctly.
7590 link_bblock (cfg, bblock, default_bblock);
7591 for (i = 0; i < n; ++i)
7592 link_bblock (cfg, bblock, targets [i]);
7594 handle_stack_args (cfg, stack_start, sp - stack_start);
7596 CHECK_UNVERIFIABLE (cfg);
7599 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
7600 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
7603 for (i = 0; i < n; ++i)
7604 link_bblock (cfg, bblock, targets [i]);
7606 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
7607 table->table = targets;
7608 table->table_size = n;
7610 use_op_switch = FALSE;
7612 /* ARM implements SWITCH statements differently */
7613 /* FIXME: Make it use the generic implementation */
7614 if (!cfg->compile_aot)
7615 use_op_switch = TRUE;
7618 if (COMPILE_LLVM (cfg))
7619 use_op_switch = TRUE;
7621 cfg->cbb->has_jump_table = 1;
7623 if (use_op_switch) {
7624 MONO_INST_NEW (cfg, ins, OP_SWITCH);
7625 ins->sreg1 = src1->dreg;
7626 ins->inst_p0 = table;
7627 ins->inst_many_bb = targets;
7628 ins->klass = GUINT_TO_POINTER (n);
7629 MONO_ADD_INS (cfg->cbb, ins);
7631 if (sizeof (gpointer) == 8)
7632 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
7634 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
7636 #if SIZEOF_REGISTER == 8
7637 /* The upper word might not be zero, and we add it to a 64 bit address later */
7638 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
7641 if (cfg->compile_aot) {
7642 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
7644 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
7645 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
7646 ins->inst_p0 = table;
7647 ins->dreg = table_reg;
7648 MONO_ADD_INS (cfg->cbb, ins);
7651 /* FIXME: Use load_memindex */
7652 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
7653 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
7654 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
7656 start_new_bblock = 1;
7657 inline_costs += (BRANCH_COST * 2);
7677 dreg = alloc_freg (cfg);
7680 dreg = alloc_lreg (cfg);
7683 dreg = alloc_ireg_ref (cfg);
7686 dreg = alloc_preg (cfg);
7689 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
7690 ins->type = ldind_type [*ip - CEE_LDIND_I1];
7691 ins->flags |= ins_flag;
7693 MONO_ADD_INS (bblock, ins);
7695 if (ins->flags & MONO_INST_VOLATILE) {
7696 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
7697 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
7698 emit_memory_barrier (cfg, FullBarrier);
7713 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
7714 ins->flags |= ins_flag;
7717 if (ins->flags & MONO_INST_VOLATILE) {
7718 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
7719 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
7720 emit_memory_barrier (cfg, FullBarrier);
7723 MONO_ADD_INS (bblock, ins);
7725 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0)))
7726 emit_write_barrier (cfg, sp [0], sp [1], -1);
7735 MONO_INST_NEW (cfg, ins, (*ip));
7737 ins->sreg1 = sp [0]->dreg;
7738 ins->sreg2 = sp [1]->dreg;
7739 type_from_op (ins, sp [0], sp [1]);
7741 ins->dreg = alloc_dreg ((cfg), (ins)->type);
7743 /* Use the immediate opcodes if possible */
7744 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
7745 int imm_opcode = mono_op_to_op_imm (ins->opcode);
7746 if (imm_opcode != -1) {
7747 ins->opcode = imm_opcode;
7748 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
7751 sp [1]->opcode = OP_NOP;
7755 MONO_ADD_INS ((cfg)->cbb, (ins));
7757 *sp++ = mono_decompose_opcode (cfg, ins);
7774 MONO_INST_NEW (cfg, ins, (*ip));
7776 ins->sreg1 = sp [0]->dreg;
7777 ins->sreg2 = sp [1]->dreg;
7778 type_from_op (ins, sp [0], sp [1]);
7780 ADD_WIDEN_OP (ins, sp [0], sp [1]);
7781 ins->dreg = alloc_dreg ((cfg), (ins)->type);
7783 /* FIXME: Pass opcode to is_inst_imm */
7785 /* Use the immediate opcodes if possible */
7786 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
7789 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
7790 if (imm_opcode != -1) {
7791 ins->opcode = imm_opcode;
7792 if (sp [1]->opcode == OP_I8CONST) {
7793 #if SIZEOF_REGISTER == 8
7794 ins->inst_imm = sp [1]->inst_l;
7796 ins->inst_ls_word = sp [1]->inst_ls_word;
7797 ins->inst_ms_word = sp [1]->inst_ms_word;
7801 ins->inst_imm = (gssize)(sp [1]->inst_c0);
7804 /* Might be followed by an instruction added by ADD_WIDEN_OP */
7805 if (sp [1]->next == NULL)
7806 sp [1]->opcode = OP_NOP;
7809 MONO_ADD_INS ((cfg)->cbb, (ins));
7811 *sp++ = mono_decompose_opcode (cfg, ins);
7824 case CEE_CONV_OVF_I8:
7825 case CEE_CONV_OVF_U8:
7829 /* Special case this earlier so we have long constants in the IR */
7830 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
7831 int data = sp [-1]->inst_c0;
7832 sp [-1]->opcode = OP_I8CONST;
7833 sp [-1]->type = STACK_I8;
7834 #if SIZEOF_REGISTER == 8
7835 if ((*ip) == CEE_CONV_U8)
7836 sp [-1]->inst_c0 = (guint32)data;
7838 sp [-1]->inst_c0 = data;
7840 sp [-1]->inst_ls_word = data;
7841 if ((*ip) == CEE_CONV_U8)
7842 sp [-1]->inst_ms_word = 0;
7844 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
7846 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
7853 case CEE_CONV_OVF_I4:
7854 case CEE_CONV_OVF_I1:
7855 case CEE_CONV_OVF_I2:
7856 case CEE_CONV_OVF_I:
7857 case CEE_CONV_OVF_U:
7860 if (sp [-1]->type == STACK_R8) {
7861 ADD_UNOP (CEE_CONV_OVF_I8);
7868 case CEE_CONV_OVF_U1:
7869 case CEE_CONV_OVF_U2:
7870 case CEE_CONV_OVF_U4:
7873 if (sp [-1]->type == STACK_R8) {
7874 ADD_UNOP (CEE_CONV_OVF_U8);
7881 case CEE_CONV_OVF_I1_UN:
7882 case CEE_CONV_OVF_I2_UN:
7883 case CEE_CONV_OVF_I4_UN:
7884 case CEE_CONV_OVF_I8_UN:
7885 case CEE_CONV_OVF_U1_UN:
7886 case CEE_CONV_OVF_U2_UN:
7887 case CEE_CONV_OVF_U4_UN:
7888 case CEE_CONV_OVF_U8_UN:
7889 case CEE_CONV_OVF_I_UN:
7890 case CEE_CONV_OVF_U_UN:
7897 CHECK_CFG_EXCEPTION;
7901 case CEE_ADD_OVF_UN:
7903 case CEE_MUL_OVF_UN:
7905 case CEE_SUB_OVF_UN:
7913 token = read32 (ip + 1);
7914 klass = mini_get_class (method, token, generic_context);
7915 CHECK_TYPELOAD (klass);
7917 if (generic_class_is_reference_type (cfg, klass)) {
7918 MonoInst *store, *load;
7919 int dreg = alloc_ireg_ref (cfg);
7921 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
7922 load->flags |= ins_flag;
7923 MONO_ADD_INS (cfg->cbb, load);
7925 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
7926 store->flags |= ins_flag;
7927 MONO_ADD_INS (cfg->cbb, store);
7929 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER)
7930 emit_write_barrier (cfg, sp [0], sp [1], -1);
7932 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7944 token = read32 (ip + 1);
7945 klass = mini_get_class (method, token, generic_context);
7946 CHECK_TYPELOAD (klass);
7948 /* Optimize the common ldobj+stloc combination */
7958 loc_index = ip [5] - CEE_STLOC_0;
7965 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
7966 CHECK_LOCAL (loc_index);
7968 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7969 ins->dreg = cfg->locals [loc_index]->dreg;
7975 /* Optimize the ldobj+stobj combination */
7976 /* The reference case ends up being a load+store anyway */
7977 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass)) {
7982 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7989 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7998 CHECK_STACK_OVF (1);
8000 n = read32 (ip + 1);
8002 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
8003 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
8004 ins->type = STACK_OBJ;
8007 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
8008 MonoInst *iargs [1];
8010 EMIT_NEW_PCONST (cfg, iargs [0], mono_method_get_wrapper_data (method, n));
8011 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
8013 if (cfg->opt & MONO_OPT_SHARED) {
8014 MonoInst *iargs [3];
8016 if (cfg->compile_aot) {
8017 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
8019 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8020 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
8021 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
8022 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
8023 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
8025 if (bblock->out_of_line) {
8026 MonoInst *iargs [2];
8028 if (image == mono_defaults.corlib) {
8030 * Avoid relocations in AOT and save some space by using a
8031 * version of helper_ldstr specialized to mscorlib.
8033 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
8034 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
8036 /* Avoid creating the string object */
8037 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
8038 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
8039 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
8043 if (cfg->compile_aot) {
8044 NEW_LDSTRCONST (cfg, ins, image, n);
8046 MONO_ADD_INS (bblock, ins);
8049 NEW_PCONST (cfg, ins, NULL);
8050 ins->type = STACK_OBJ;
8051 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
8053 OUT_OF_MEMORY_FAILURE;
8056 MONO_ADD_INS (bblock, ins);
8065 MonoInst *iargs [2];
8066 MonoMethodSignature *fsig;
8069 MonoInst *vtable_arg = NULL;
8072 token = read32 (ip + 1);
8073 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
8074 if (!cmethod || mono_loader_get_last_error ())
8076 fsig = mono_method_get_signature (cmethod, image, token);
8080 mono_save_token_info (cfg, image, token, cmethod);
8082 if (!mono_class_init (cmethod->klass))
8085 if (cfg->generic_sharing_context)
8086 context_used = mono_method_check_context_used (cmethod);
8088 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
8089 if (check_linkdemand (cfg, method, cmethod))
8091 CHECK_CFG_EXCEPTION;
8092 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
8093 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
8096 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable_impl (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
8097 emit_generic_class_init (cfg, cmethod->klass);
8098 CHECK_TYPELOAD (cmethod->klass);
8101 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
8102 mono_method_is_generic_sharable_impl (cmethod, TRUE)) {
8103 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
8104 mono_class_vtable (cfg->domain, cmethod->klass);
8105 CHECK_TYPELOAD (cmethod->klass);
8107 vtable_arg = emit_get_rgctx_method (cfg, context_used,
8108 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
8111 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
8112 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
8114 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
8116 CHECK_TYPELOAD (cmethod->klass);
8117 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
8122 n = fsig->param_count;
8126 * Generate smaller code for the common newobj <exception> instruction in
8127 * argument checking code.
8129 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
8130 is_exception_class (cmethod->klass) && n <= 2 &&
8131 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
8132 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
8133 MonoInst *iargs [3];
8135 g_assert (!vtable_arg);
8139 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
8142 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
8146 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
8151 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
8154 g_assert_not_reached ();
8162 /* move the args to allow room for 'this' in the first position */
8168 /* check_call_signature () requires sp[0] to be set */
8169 this_ins.type = STACK_OBJ;
8171 if (check_call_signature (cfg, fsig, sp))
8176 if (mini_class_is_system_array (cmethod->klass)) {
8177 g_assert (!vtable_arg);
8179 *sp = emit_get_rgctx_method (cfg, context_used,
8180 cmethod, MONO_RGCTX_INFO_METHOD);
8182 /* Avoid varargs in the common case */
8183 if (fsig->param_count == 1)
8184 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
8185 else if (fsig->param_count == 2)
8186 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
8187 else if (fsig->param_count == 3)
8188 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
8190 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
8191 } else if (cmethod->string_ctor) {
8192 g_assert (!context_used);
8193 g_assert (!vtable_arg);
8194 /* we simply pass a null pointer */
8195 EMIT_NEW_PCONST (cfg, *sp, NULL);
8196 /* now call the string ctor */
8197 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, sp, NULL, NULL, NULL);
8199 MonoInst* callvirt_this_arg = NULL;
8201 if (cmethod->klass->valuetype) {
8202 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
8203 MONO_EMIT_NEW_VZERO (cfg, iargs [0]->dreg, cmethod->klass);
8204 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
8209 * The code generated by mini_emit_virtual_call () expects
8210 * iargs [0] to be a boxed instance, but luckily the vcall
8211 * will be transformed into a normal call there.
8213 } else if (context_used) {
8214 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
8217 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
8219 CHECK_TYPELOAD (cmethod->klass);
8222 * TypeInitializationExceptions thrown from the mono_runtime_class_init
8223 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
8224 * As a workaround, we call class cctors before allocating objects.
8226 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
8227 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
8228 if (cfg->verbose_level > 2)
8229 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
8230 class_inits = g_slist_prepend (class_inits, vtable);
8233 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
8236 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
8239 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
8241 /* Now call the actual ctor */
8242 /* Avoid virtual calls to ctors if possible */
8243 if (cmethod->klass->marshalbyref)
8244 callvirt_this_arg = sp [0];
8247 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
8248 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8249 type_to_eval_stack_type ((cfg), fsig->ret, ins);
8254 CHECK_CFG_EXCEPTION;
8255 } else if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
8256 !disable_inline && mono_method_check_inlining (cfg, cmethod) &&
8257 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE) &&
8258 !g_list_find (dont_inline, cmethod)) {
8261 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, FALSE))) {
8262 cfg->real_offset += 5;
8265 inline_costs += costs - 5;
8268 mono_emit_method_call_full (cfg, cmethod, fsig, sp, callvirt_this_arg, NULL, NULL);
8270 } else if (context_used &&
8271 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
8272 !mono_class_generic_sharing_enabled (cmethod->klass))) {
8273 MonoInst *cmethod_addr;
8275 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
8276 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
8278 mono_emit_calli (cfg, fsig, sp, cmethod_addr, vtable_arg);
8281 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp,
8282 callvirt_this_arg, NULL, vtable_arg);
8286 if (alloc == NULL) {
8288 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
8289 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
8303 token = read32 (ip + 1);
8304 klass = mini_get_class (method, token, generic_context);
8305 CHECK_TYPELOAD (klass);
8306 if (sp [0]->type != STACK_OBJ)
8309 if (cfg->generic_sharing_context)
8310 context_used = mono_class_check_context_used (klass);
8312 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
8313 MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
8320 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
8323 if (cfg->compile_aot)
8324 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
8326 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
8328 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
8329 *sp++ = mono_emit_method_call (cfg, mono_castclass, args, NULL);
8332 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
8333 MonoMethod *mono_castclass;
8334 MonoInst *iargs [1];
8337 mono_castclass = mono_marshal_get_castclass (klass);
8340 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
8341 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8342 CHECK_CFG_EXCEPTION;
8343 g_assert (costs > 0);
8346 cfg->real_offset += 5;
8351 inline_costs += costs;
8354 ins = handle_castclass (cfg, klass, *sp, context_used);
8355 CHECK_CFG_EXCEPTION;
8365 token = read32 (ip + 1);
8366 klass = mini_get_class (method, token, generic_context);
8367 CHECK_TYPELOAD (klass);
8368 if (sp [0]->type != STACK_OBJ)
8371 if (cfg->generic_sharing_context)
8372 context_used = mono_class_check_context_used (klass);
8374 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
8375 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
8382 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
8385 if (cfg->compile_aot)
8386 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
8388 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
8390 *sp++ = mono_emit_method_call (cfg, mono_isinst, args, NULL);
8393 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
8394 MonoMethod *mono_isinst;
8395 MonoInst *iargs [1];
8398 mono_isinst = mono_marshal_get_isinst (klass);
8401 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
8402 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8403 CHECK_CFG_EXCEPTION;
8404 g_assert (costs > 0);
8407 cfg->real_offset += 5;
8412 inline_costs += costs;
8415 ins = handle_isinst (cfg, klass, *sp, context_used);
8416 CHECK_CFG_EXCEPTION;
8423 case CEE_UNBOX_ANY: {
8427 token = read32 (ip + 1);
8428 klass = mini_get_class (method, token, generic_context);
8429 CHECK_TYPELOAD (klass);
8431 mono_save_token_info (cfg, image, token, klass);
8433 if (cfg->generic_sharing_context)
8434 context_used = mono_class_check_context_used (klass);
8436 if (generic_class_is_reference_type (cfg, klass)) {
8437 /* CASTCLASS FIXME kill this huge slice of duplicated code*/
8438 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
8439 MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
8446 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
8449 /*FIXME AOT support*/
8450 if (cfg->compile_aot)
8451 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
8453 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
8455 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
8456 *sp++ = mono_emit_method_call (cfg, mono_castclass, args, NULL);
8459 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
8460 MonoMethod *mono_castclass;
8461 MonoInst *iargs [1];
8464 mono_castclass = mono_marshal_get_castclass (klass);
8467 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
8468 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8469 CHECK_CFG_EXCEPTION;
8470 g_assert (costs > 0);
8473 cfg->real_offset += 5;
8477 inline_costs += costs;
8479 ins = handle_castclass (cfg, klass, *sp, context_used);
8480 CHECK_CFG_EXCEPTION;
8488 if (mono_class_is_nullable (klass)) {
8489 ins = handle_unbox_nullable (cfg, *sp, klass, context_used);
8496 ins = handle_unbox (cfg, klass, sp, context_used);
8502 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
8515 token = read32 (ip + 1);
8516 klass = mini_get_class (method, token, generic_context);
8517 CHECK_TYPELOAD (klass);
8519 mono_save_token_info (cfg, image, token, klass);
8521 if (cfg->generic_sharing_context)
8522 context_used = mono_class_check_context_used (klass);
8524 if (generic_class_is_reference_type (cfg, klass)) {
8530 if (klass == mono_defaults.void_class)
8532 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
8534 /* frequent check in generic code: box (struct), brtrue */
8536 // FIXME: LLVM can't handle the inconsistent bb linking
8537 if (!mono_class_is_nullable (klass) &&
8538 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) &&
8539 (ip [5] == CEE_BRTRUE ||
8540 ip [5] == CEE_BRTRUE_S ||
8541 ip [5] == CEE_BRFALSE ||
8542 ip [5] == CEE_BRFALSE_S)) {
8543 gboolean is_true = ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S;
8545 MonoBasicBlock *true_bb, *false_bb;
8549 if (cfg->verbose_level > 3) {
8550 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
8551 printf ("<box+brtrue opt>\n");
8559 target = ip + 1 + (signed char)(*ip);
8566 target = ip + 4 + (gint)(read32 (ip));
8570 g_assert_not_reached ();
8574 * We need to link both bblocks, since it is needed for handling stack
8575 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
8576 * Branching to only one of them would lead to inconsistencies, so
8577 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
8579 GET_BBLOCK (cfg, true_bb, target);
8580 GET_BBLOCK (cfg, false_bb, ip);
8582 mono_link_bblock (cfg, cfg->cbb, true_bb);
8583 mono_link_bblock (cfg, cfg->cbb, false_bb);
8585 if (sp != stack_start) {
8586 handle_stack_args (cfg, stack_start, sp - stack_start);
8588 CHECK_UNVERIFIABLE (cfg);
8591 if (COMPILE_LLVM (cfg)) {
8592 dreg = alloc_ireg (cfg);
8593 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
8594 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
8596 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
8598 /* The JIT can't eliminate the iconst+compare */
8599 MONO_INST_NEW (cfg, ins, OP_BR);
8600 ins->inst_target_bb = is_true ? true_bb : false_bb;
8601 MONO_ADD_INS (cfg->cbb, ins);
8604 start_new_bblock = 1;
8608 *sp++ = handle_box (cfg, val, klass, context_used);
8610 CHECK_CFG_EXCEPTION;
8619 token = read32 (ip + 1);
8620 klass = mini_get_class (method, token, generic_context);
8621 CHECK_TYPELOAD (klass);
8623 mono_save_token_info (cfg, image, token, klass);
8625 if (cfg->generic_sharing_context)
8626 context_used = mono_class_check_context_used (klass);
8628 if (mono_class_is_nullable (klass)) {
8631 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
8632 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
8636 ins = handle_unbox (cfg, klass, sp, context_used);
8649 MonoClassField *field;
8652 gboolean is_instance;
8654 gpointer addr = NULL;
8655 gboolean is_special_static;
8657 MonoInst *store_val = NULL;
8660 is_instance = (op == CEE_LDFLD || op == CEE_LDFLDA || op == CEE_STFLD);
8662 if (op == CEE_STFLD) {
8670 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
8672 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
8675 if (op == CEE_STSFLD) {
8683 token = read32 (ip + 1);
8684 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8685 field = mono_method_get_wrapper_data (method, token);
8686 klass = field->parent;
8689 field = mono_field_from_token (image, token, &klass, generic_context);
8693 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
8694 FIELD_ACCESS_FAILURE;
8695 mono_class_init (klass);
8697 if (is_instance && *ip != CEE_LDFLDA && is_magic_tls_access (field))
8700 /* if the class is Critical then transparent code cannot access it's fields */
8701 if (!is_instance && mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
8702 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
8704 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
8705 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
8706 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
8707 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
8711 * LDFLD etc. is usable on static fields as well, so convert those cases to
8714 if (is_instance && field->type->attrs & FIELD_ATTRIBUTE_STATIC) {
8726 g_assert_not_reached ();
8728 is_instance = FALSE;
8733 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
8734 if (op == CEE_STFLD) {
8735 if (target_type_is_incompatible (cfg, field->type, sp [1]))
8737 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
8738 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
8739 MonoInst *iargs [5];
8742 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8743 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
8744 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
8748 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
8749 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
8750 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8751 CHECK_CFG_EXCEPTION;
8752 g_assert (costs > 0);
8754 cfg->real_offset += 5;
8757 inline_costs += costs;
8759 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
8764 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
8766 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
8767 if (sp [0]->opcode != OP_LDADDR)
8768 store->flags |= MONO_INST_FAULT;
8770 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
8771 /* insert call to write barrier */
8775 dreg = alloc_ireg_mp (cfg);
8776 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
8777 emit_write_barrier (cfg, ptr, sp [1], -1);
8780 store->flags |= ins_flag;
8787 if (is_instance && ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class)) {
8788 MonoMethod *wrapper = (op == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
8789 MonoInst *iargs [4];
8792 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8793 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
8794 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
8795 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
8796 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
8797 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8798 CHECK_CFG_EXCEPTION;
8800 g_assert (costs > 0);
8802 cfg->real_offset += 5;
8806 inline_costs += costs;
8808 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
8811 } else if (is_instance) {
8812 if (sp [0]->type == STACK_VTYPE) {
8815 /* Have to compute the address of the variable */
8817 var = get_vreg_to_inst (cfg, sp [0]->dreg);
8819 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
8821 g_assert (var->klass == klass);
8823 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
8827 if (op == CEE_LDFLDA) {
8828 if (is_magic_tls_access (field)) {
8830 *sp++ = create_magic_tls_access (cfg, field, &cached_tls_addr, ins);
8832 if (sp [0]->type == STACK_OBJ) {
8833 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
8834 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
8837 dreg = alloc_ireg_mp (cfg);
8839 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
8840 ins->klass = mono_class_from_mono_type (field->type);
8841 ins->type = STACK_MP;
8847 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
8849 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
8850 load->flags |= ins_flag;
8851 if (sp [0]->opcode != OP_LDADDR)
8852 load->flags |= MONO_INST_FAULT;
8866 * We can only support shared generic static
8867 * field access on architectures where the
8868 * trampoline code has been extended to handle
8869 * the generic class init.
8871 #ifndef MONO_ARCH_VTABLE_REG
8872 GENERIC_SHARING_FAILURE (op);
8875 if (cfg->generic_sharing_context)
8876 context_used = mono_class_check_context_used (klass);
8878 ftype = mono_field_get_type (field);
8880 g_assert (!(ftype->attrs & FIELD_ATTRIBUTE_LITERAL));
8882 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
8883 * to be called here.
8885 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
8886 mono_class_vtable (cfg->domain, klass);
8887 CHECK_TYPELOAD (klass);
8889 mono_domain_lock (cfg->domain);
8890 if (cfg->domain->special_static_fields)
8891 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
8892 mono_domain_unlock (cfg->domain);
8894 is_special_static = mono_class_field_is_special_static (field);
8896 /* Generate IR to compute the field address */
8897 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && mono_get_thread_intrinsic (cfg) && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
8899 * Fast access to TLS data
8900 * Inline version of get_thread_static_data () in
8904 int idx, static_data_reg, array_reg, dreg;
8905 MonoInst *thread_ins;
8907 // offset &= 0x7fffffff;
8908 // idx = (offset >> 24) - 1;
8909 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
8911 thread_ins = mono_get_thread_intrinsic (cfg);
8912 MONO_ADD_INS (cfg->cbb, thread_ins);
8913 static_data_reg = alloc_ireg (cfg);
8914 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, G_STRUCT_OFFSET (MonoInternalThread, static_data));
8916 if (cfg->compile_aot) {
8917 int offset_reg, offset2_reg, idx_reg;
8919 /* For TLS variables, this will return the TLS offset */
8920 EMIT_NEW_SFLDACONST (cfg, ins, field);
8921 offset_reg = ins->dreg;
8922 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
8923 idx_reg = alloc_ireg (cfg);
8924 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
8925 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
8926 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
8927 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
8928 array_reg = alloc_ireg (cfg);
8929 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
8930 offset2_reg = alloc_ireg (cfg);
8931 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
8932 dreg = alloc_ireg (cfg);
8933 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
8935 offset = (gsize)addr & 0x7fffffff;
8936 idx = (offset >> 24) - 1;
8938 array_reg = alloc_ireg (cfg);
8939 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
8940 dreg = alloc_ireg (cfg);
8941 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, (offset & 0xffffff));
8943 } else if ((cfg->opt & MONO_OPT_SHARED) ||
8944 (cfg->compile_aot && is_special_static) ||
8945 (context_used && is_special_static)) {
8946 MonoInst *iargs [2];
8948 g_assert (field->parent);
8949 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8951 iargs [1] = emit_get_rgctx_field (cfg, context_used,
8952 field, MONO_RGCTX_INFO_CLASS_FIELD);
8954 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8956 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8957 } else if (context_used) {
8958 MonoInst *static_data;
8961 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
8962 method->klass->name_space, method->klass->name, method->name,
8963 depth, field->offset);
8966 if (mono_class_needs_cctor_run (klass, method))
8967 emit_generic_class_init (cfg, klass);
8970 * The pointer we're computing here is
8972 * super_info.static_data + field->offset
8974 static_data = emit_get_rgctx_klass (cfg, context_used,
8975 klass, MONO_RGCTX_INFO_STATIC_DATA);
8977 if (field->offset == 0) {
8980 int addr_reg = mono_alloc_preg (cfg);
8981 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
8983 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
8984 MonoInst *iargs [2];
8986 g_assert (field->parent);
8987 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8988 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8989 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8991 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
8993 CHECK_TYPELOAD (klass);
8995 if (mini_field_access_needs_cctor_run (cfg, method, vtable)) {
8996 if (!(g_slist_find (class_inits, vtable))) {
8997 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
8998 if (cfg->verbose_level > 2)
8999 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
9000 class_inits = g_slist_prepend (class_inits, vtable);
9003 if (cfg->run_cctors) {
9005 /* This makes so that inline cannot trigger */
9006 /* .cctors: too many apps depend on them */
9007 /* running with a specific order... */
9008 if (! vtable->initialized)
9010 ex = mono_runtime_class_init_full (vtable, FALSE);
9012 set_exception_object (cfg, ex);
9013 goto exception_exit;
9017 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
9019 if (cfg->compile_aot)
9020 EMIT_NEW_SFLDACONST (cfg, ins, field);
9022 EMIT_NEW_PCONST (cfg, ins, addr);
9024 MonoInst *iargs [1];
9025 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
9026 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
9030 /* Generate IR to do the actual load/store operation */
9032 if (op == CEE_LDSFLDA) {
9033 ins->klass = mono_class_from_mono_type (ftype);
9034 ins->type = STACK_PTR;
9036 } else if (op == CEE_STSFLD) {
9039 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, ftype, ins->dreg, 0, store_val->dreg);
9040 store->flags |= ins_flag;
9042 gboolean is_const = FALSE;
9043 MonoVTable *vtable = NULL;
9044 gpointer addr = NULL;
9046 if (!context_used) {
9047 vtable = mono_class_vtable (cfg->domain, klass);
9048 CHECK_TYPELOAD (klass);
9050 if ((ftype->attrs & FIELD_ATTRIBUTE_INIT_ONLY) && (((addr = mono_aot_readonly_field_override (field)) != NULL) ||
9051 (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) && vtable->initialized))) {
9052 int ro_type = ftype->type;
9054 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
9055 if (ro_type == MONO_TYPE_VALUETYPE && ftype->data.klass->enumtype) {
9056 ro_type = mono_class_enum_basetype (ftype->data.klass)->type;
9058 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
9061 case MONO_TYPE_BOOLEAN:
9063 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
9067 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
9070 case MONO_TYPE_CHAR:
9072 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
9076 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
9081 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
9085 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
9091 case MONO_TYPE_FNPTR:
9092 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
9093 type_to_eval_stack_type ((cfg), field->type, *sp);
9096 case MONO_TYPE_STRING:
9097 case MONO_TYPE_OBJECT:
9098 case MONO_TYPE_CLASS:
9099 case MONO_TYPE_SZARRAY:
9100 case MONO_TYPE_ARRAY:
9101 if (!mono_gc_is_moving ()) {
9102 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
9103 type_to_eval_stack_type ((cfg), field->type, *sp);
9111 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
9116 case MONO_TYPE_VALUETYPE:
9126 CHECK_STACK_OVF (1);
9128 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
9129 load->flags |= ins_flag;
9142 token = read32 (ip + 1);
9143 klass = mini_get_class (method, token, generic_context);
9144 CHECK_TYPELOAD (klass);
9145 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
9146 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
9147 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
9148 generic_class_is_reference_type (cfg, klass)) {
9149 /* insert call to write barrier */
9150 emit_write_barrier (cfg, sp [0], sp [1], -1);
9162 const char *data_ptr;
9164 guint32 field_token;
9170 token = read32 (ip + 1);
9172 klass = mini_get_class (method, token, generic_context);
9173 CHECK_TYPELOAD (klass);
9175 if (cfg->generic_sharing_context)
9176 context_used = mono_class_check_context_used (klass);
9178 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
9179 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_I4);
9180 ins->sreg1 = sp [0]->dreg;
9181 ins->type = STACK_I4;
9182 ins->dreg = alloc_ireg (cfg);
9183 MONO_ADD_INS (cfg->cbb, ins);
9184 *sp = mono_decompose_opcode (cfg, ins);
9189 MonoClass *array_class = mono_array_class_get (klass, 1);
9190 /* FIXME: we cannot get a managed
9191 allocator because we can't get the
9192 open generic class's vtable. We
9193 have the same problem in
9194 handle_alloc(). This
9195 needs to be solved so that we can
9196 have managed allocs of shared
9199 MonoVTable *array_class_vtable = mono_class_vtable (cfg->domain, array_class);
9200 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class_vtable, 1);
9202 MonoMethod *managed_alloc = NULL;
9204 /* FIXME: Decompose later to help abcrem */
9207 args [0] = emit_get_rgctx_klass (cfg, context_used,
9208 array_class, MONO_RGCTX_INFO_VTABLE);
9213 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
9215 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
9217 if (cfg->opt & MONO_OPT_SHARED) {
9218 /* Decompose now to avoid problems with references to the domainvar */
9219 MonoInst *iargs [3];
9221 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
9222 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
9225 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
9227 /* Decompose later since it is needed by abcrem */
9228 MonoClass *array_type = mono_array_class_get (klass, 1);
9229 mono_class_vtable (cfg->domain, array_type);
9230 CHECK_TYPELOAD (array_type);
9232 MONO_INST_NEW (cfg, ins, OP_NEWARR);
9233 ins->dreg = alloc_ireg_ref (cfg);
9234 ins->sreg1 = sp [0]->dreg;
9235 ins->inst_newa_class = klass;
9236 ins->type = STACK_OBJ;
9238 MONO_ADD_INS (cfg->cbb, ins);
9239 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
9240 cfg->cbb->has_array_access = TRUE;
9242 /* Needed so mono_emit_load_get_addr () gets called */
9243 mono_get_got_var (cfg);
9253 * we inline/optimize the initialization sequence if possible.
9254 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
9255 * for small sizes open code the memcpy
9256 * ensure the rva field is big enough
9258 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
9259 MonoMethod *memcpy_method = get_memcpy_method ();
9260 MonoInst *iargs [3];
9261 int add_reg = alloc_ireg_mp (cfg);
9263 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, G_STRUCT_OFFSET (MonoArray, vector));
9264 if (cfg->compile_aot) {
9265 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
9267 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
9269 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
9270 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
9279 if (sp [0]->type != STACK_OBJ)
9282 MONO_INST_NEW (cfg, ins, OP_LDLEN);
9283 ins->dreg = alloc_preg (cfg);
9284 ins->sreg1 = sp [0]->dreg;
9285 ins->type = STACK_I4;
9286 /* This flag will be inherited by the decomposition */
9287 ins->flags |= MONO_INST_FAULT;
9288 MONO_ADD_INS (cfg->cbb, ins);
9289 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
9290 cfg->cbb->has_array_access = TRUE;
9298 if (sp [0]->type != STACK_OBJ)
9301 cfg->flags |= MONO_CFG_HAS_LDELEMA;
9303 klass = mini_get_class (method, read32 (ip + 1), generic_context);
9304 CHECK_TYPELOAD (klass);
9305 /* we need to make sure that this array is exactly the type it needs
9306 * to be for correctness. the wrappers are lax with their usage
9307 * so we need to ignore them here
9309 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
9310 MonoClass *array_class = mono_array_class_get (klass, 1);
9311 mini_emit_check_array_type (cfg, sp [0], array_class);
9312 CHECK_TYPELOAD (array_class);
9316 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
9331 case CEE_LDELEM_REF: {
9337 if (*ip == CEE_LDELEM) {
9339 token = read32 (ip + 1);
9340 klass = mini_get_class (method, token, generic_context);
9341 CHECK_TYPELOAD (klass);
9342 mono_class_init (klass);
9345 klass = array_access_to_klass (*ip);
9347 if (sp [0]->type != STACK_OBJ)
9350 cfg->flags |= MONO_CFG_HAS_LDELEMA;
9352 if (sp [1]->opcode == OP_ICONST) {
9353 int array_reg = sp [0]->dreg;
9354 int index_reg = sp [1]->dreg;
9355 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
9357 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
9358 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
9360 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
9361 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
9364 if (*ip == CEE_LDELEM)
9377 case CEE_STELEM_REF:
9384 cfg->flags |= MONO_CFG_HAS_LDELEMA;
9386 if (*ip == CEE_STELEM) {
9388 token = read32 (ip + 1);
9389 klass = mini_get_class (method, token, generic_context);
9390 CHECK_TYPELOAD (klass);
9391 mono_class_init (klass);
9394 klass = array_access_to_klass (*ip);
9396 if (sp [0]->type != STACK_OBJ)
9399 /* storing a NULL doesn't need any of the complex checks in stelemref */
9400 if (generic_class_is_reference_type (cfg, klass) &&
9401 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
9402 MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class, 1);
9403 MonoMethod *helper = mono_marshal_get_virtual_stelemref (obj_array);
9404 MonoInst *iargs [3];
9407 mono_class_setup_vtable (obj_array);
9408 g_assert (helper->slot);
9410 if (sp [0]->type != STACK_OBJ)
9412 if (sp [2]->type != STACK_OBJ)
9419 mono_emit_method_call (cfg, helper, iargs, sp [0]);
9421 if (sp [1]->opcode == OP_ICONST) {
9422 int array_reg = sp [0]->dreg;
9423 int index_reg = sp [1]->dreg;
9424 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
9426 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
9427 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
9429 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
9430 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
9434 if (*ip == CEE_STELEM)
9441 case CEE_CKFINITE: {
9445 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
9446 ins->sreg1 = sp [0]->dreg;
9447 ins->dreg = alloc_freg (cfg);
9448 ins->type = STACK_R8;
9449 MONO_ADD_INS (bblock, ins);
9451 *sp++ = mono_decompose_opcode (cfg, ins);
9456 case CEE_REFANYVAL: {
9457 MonoInst *src_var, *src;
9459 int klass_reg = alloc_preg (cfg);
9460 int dreg = alloc_preg (cfg);
9463 MONO_INST_NEW (cfg, ins, *ip);
9466 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
9467 CHECK_TYPELOAD (klass);
9468 mono_class_init (klass);
9470 if (cfg->generic_sharing_context)
9471 context_used = mono_class_check_context_used (klass);
9474 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9476 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
9477 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
9478 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass));
9481 MonoInst *klass_ins;
9483 klass_ins = emit_get_rgctx_klass (cfg, context_used,
9484 klass, MONO_RGCTX_INFO_KLASS);
9487 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
9488 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
9490 mini_emit_class_check (cfg, klass_reg, klass);
9492 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, value));
9493 ins->type = STACK_MP;
9498 case CEE_MKREFANY: {
9499 MonoInst *loc, *addr;
9502 MONO_INST_NEW (cfg, ins, *ip);
9505 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
9506 CHECK_TYPELOAD (klass);
9507 mono_class_init (klass);
9509 if (cfg->generic_sharing_context)
9510 context_used = mono_class_check_context_used (klass);
9512 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
9513 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
9516 MonoInst *const_ins;
9517 int type_reg = alloc_preg (cfg);
9519 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
9520 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
9521 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, G_STRUCT_OFFSET (MonoClass, byval_arg));
9522 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
9523 } else if (cfg->compile_aot) {
9524 int const_reg = alloc_preg (cfg);
9525 int type_reg = alloc_preg (cfg);
9527 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
9528 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
9529 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, G_STRUCT_OFFSET (MonoClass, byval_arg));
9530 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
9532 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
9533 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), klass);
9535 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
9537 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
9538 ins->type = STACK_VTYPE;
9539 ins->klass = mono_defaults.typed_reference_class;
9546 MonoClass *handle_class;
9548 CHECK_STACK_OVF (1);
9551 n = read32 (ip + 1);
9553 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
9554 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
9555 handle = mono_method_get_wrapper_data (method, n);
9556 handle_class = mono_method_get_wrapper_data (method, n + 1);
9557 if (handle_class == mono_defaults.typehandle_class)
9558 handle = &((MonoClass*)handle)->byval_arg;
9561 handle = mono_ldtoken (image, n, &handle_class, generic_context);
9565 mono_class_init (handle_class);
9566 if (cfg->generic_sharing_context) {
9567 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
9568 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
9569 /* This case handles ldtoken
9570 of an open type, like for
9573 } else if (handle_class == mono_defaults.typehandle_class) {
9574 /* If we get a MONO_TYPE_CLASS
9575 then we need to provide the
9577 instantiation of it. */
9578 if (mono_type_get_type (handle) == MONO_TYPE_CLASS)
9581 context_used = mono_class_check_context_used (mono_class_from_mono_type (handle));
9582 } else if (handle_class == mono_defaults.fieldhandle_class)
9583 context_used = mono_class_check_context_used (((MonoClassField*)handle)->parent);
9584 else if (handle_class == mono_defaults.methodhandle_class)
9585 context_used = mono_method_check_context_used (handle);
9587 g_assert_not_reached ();
9590 if ((cfg->opt & MONO_OPT_SHARED) &&
9591 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
9592 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
9593 MonoInst *addr, *vtvar, *iargs [3];
9594 int method_context_used;
9596 if (cfg->generic_sharing_context)
9597 method_context_used = mono_method_check_context_used (method);
9599 method_context_used = 0;
9601 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
9603 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
9604 EMIT_NEW_ICONST (cfg, iargs [1], n);
9605 if (method_context_used) {
9606 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
9607 method, MONO_RGCTX_INFO_METHOD);
9608 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
9610 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
9611 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
9613 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9615 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
9617 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9619 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
9620 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
9621 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
9622 (cmethod->klass == mono_defaults.monotype_class->parent) &&
9623 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
9624 MonoClass *tclass = mono_class_from_mono_type (handle);
9626 mono_class_init (tclass);
9628 ins = emit_get_rgctx_klass (cfg, context_used,
9629 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
9630 } else if (cfg->compile_aot) {
9631 if (method->wrapper_type) {
9632 if (mono_class_get (tclass->image, tclass->type_token) == tclass && !generic_context) {
9633 /* Special case for static synchronized wrappers */
9634 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
9636 /* FIXME: n is not a normal token */
9637 cfg->disable_aot = TRUE;
9638 EMIT_NEW_PCONST (cfg, ins, NULL);
9641 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
9644 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
9646 ins->type = STACK_OBJ;
9647 ins->klass = cmethod->klass;
9650 MonoInst *addr, *vtvar;
9652 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
9655 if (handle_class == mono_defaults.typehandle_class) {
9656 ins = emit_get_rgctx_klass (cfg, context_used,
9657 mono_class_from_mono_type (handle),
9658 MONO_RGCTX_INFO_TYPE);
9659 } else if (handle_class == mono_defaults.methodhandle_class) {
9660 ins = emit_get_rgctx_method (cfg, context_used,
9661 handle, MONO_RGCTX_INFO_METHOD);
9662 } else if (handle_class == mono_defaults.fieldhandle_class) {
9663 ins = emit_get_rgctx_field (cfg, context_used,
9664 handle, MONO_RGCTX_INFO_CLASS_FIELD);
9666 g_assert_not_reached ();
9668 } else if (cfg->compile_aot) {
9669 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n);
9671 EMIT_NEW_PCONST (cfg, ins, handle);
9673 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9674 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
9675 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9685 MONO_INST_NEW (cfg, ins, OP_THROW);
9687 ins->sreg1 = sp [0]->dreg;
9689 bblock->out_of_line = TRUE;
9690 MONO_ADD_INS (bblock, ins);
9691 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
9692 MONO_ADD_INS (bblock, ins);
9695 link_bblock (cfg, bblock, end_bblock);
9696 start_new_bblock = 1;
9698 case CEE_ENDFINALLY:
9699 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
9700 MONO_ADD_INS (bblock, ins);
9702 start_new_bblock = 1;
9705 * Control will leave the method so empty the stack, otherwise
9706 * the next basic block will start with a nonempty stack.
9708 while (sp != stack_start) {
9716 if (*ip == CEE_LEAVE) {
9718 target = ip + 5 + (gint32)read32(ip + 1);
9721 target = ip + 2 + (signed char)(ip [1]);
9724 /* empty the stack */
9725 while (sp != stack_start) {
9730 * If this leave statement is in a catch block, check for a
9731 * pending exception, and rethrow it if necessary.
9732 * We avoid doing this in runtime invoke wrappers, since those are called
9733 * by native code which excepts the wrapper to catch all exceptions.
9735 for (i = 0; i < header->num_clauses; ++i) {
9736 MonoExceptionClause *clause = &header->clauses [i];
9739 * Use <= in the final comparison to handle clauses with multiple
9740 * leave statements, like in bug #78024.
9741 * The ordering of the exception clauses guarantees that we find the
9744 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
9746 MonoBasicBlock *dont_throw;
9751 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
9754 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
9756 NEW_BBLOCK (cfg, dont_throw);
9759 * Currently, we always rethrow the abort exception, despite the
9760 * fact that this is not correct. See thread6.cs for an example.
9761 * But propagating the abort exception is more important than
9762 * getting the sematics right.
9764 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
9765 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
9766 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
9768 MONO_START_BB (cfg, dont_throw);
9773 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
9775 MonoExceptionClause *clause;
9777 for (tmp = handlers; tmp; tmp = tmp->next) {
9779 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
9781 link_bblock (cfg, bblock, tblock);
9782 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
9783 ins->inst_target_bb = tblock;
9784 ins->inst_eh_block = clause;
9785 MONO_ADD_INS (bblock, ins);
9786 bblock->has_call_handler = 1;
9787 if (COMPILE_LLVM (cfg)) {
9788 MonoBasicBlock *target_bb;
9791 * Link the finally bblock with the target, since it will
9792 * conceptually branch there.
9793 * FIXME: Have to link the bblock containing the endfinally.
9795 GET_BBLOCK (cfg, target_bb, target);
9796 link_bblock (cfg, tblock, target_bb);
9799 g_list_free (handlers);
9802 MONO_INST_NEW (cfg, ins, OP_BR);
9803 MONO_ADD_INS (bblock, ins);
9804 GET_BBLOCK (cfg, tblock, target);
9805 link_bblock (cfg, bblock, tblock);
9806 ins->inst_target_bb = tblock;
9807 start_new_bblock = 1;
9809 if (*ip == CEE_LEAVE)
9818 * Mono specific opcodes
9820 case MONO_CUSTOM_PREFIX: {
9822 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
9826 case CEE_MONO_ICALL: {
9828 MonoJitICallInfo *info;
9830 token = read32 (ip + 2);
9831 func = mono_method_get_wrapper_data (method, token);
9832 info = mono_find_jit_icall_by_addr (func);
9835 CHECK_STACK (info->sig->param_count);
9836 sp -= info->sig->param_count;
9838 ins = mono_emit_jit_icall (cfg, info->func, sp);
9839 if (!MONO_TYPE_IS_VOID (info->sig->ret))
9843 inline_costs += 10 * num_calls++;
9847 case CEE_MONO_LDPTR: {
9850 CHECK_STACK_OVF (1);
9852 token = read32 (ip + 2);
9854 ptr = mono_method_get_wrapper_data (method, token);
9855 if (cfg->compile_aot && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) && (strstr (method->name, "__icall_wrapper_") == method->name)) {
9856 MonoJitICallInfo *callinfo;
9857 const char *icall_name;
9859 icall_name = method->name + strlen ("__icall_wrapper_");
9860 g_assert (icall_name);
9861 callinfo = mono_find_jit_icall_by_name (icall_name);
9862 g_assert (callinfo);
9864 if (ptr == callinfo->func) {
9865 /* Will be transformed into an AOTCONST later */
9866 EMIT_NEW_PCONST (cfg, ins, ptr);
9872 /* FIXME: Generalize this */
9873 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
9874 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
9879 EMIT_NEW_PCONST (cfg, ins, ptr);
9882 inline_costs += 10 * num_calls++;
9883 /* Can't embed random pointers into AOT code */
9884 cfg->disable_aot = 1;
9887 case CEE_MONO_ICALL_ADDR: {
9888 MonoMethod *cmethod;
9891 CHECK_STACK_OVF (1);
9893 token = read32 (ip + 2);
9895 cmethod = mono_method_get_wrapper_data (method, token);
9897 if (cfg->compile_aot) {
9898 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
9900 ptr = mono_lookup_internal_call (cmethod);
9902 EMIT_NEW_PCONST (cfg, ins, ptr);
9908 case CEE_MONO_VTADDR: {
9909 MonoInst *src_var, *src;
9915 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9916 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
9921 case CEE_MONO_NEWOBJ: {
9922 MonoInst *iargs [2];
9924 CHECK_STACK_OVF (1);
9926 token = read32 (ip + 2);
9927 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9928 mono_class_init (klass);
9929 NEW_DOMAINCONST (cfg, iargs [0]);
9930 MONO_ADD_INS (cfg->cbb, iargs [0]);
9931 NEW_CLASSCONST (cfg, iargs [1], klass);
9932 MONO_ADD_INS (cfg->cbb, iargs [1]);
9933 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
9935 inline_costs += 10 * num_calls++;
9938 case CEE_MONO_OBJADDR:
9941 MONO_INST_NEW (cfg, ins, OP_MOVE);
9942 ins->dreg = alloc_ireg_mp (cfg);
9943 ins->sreg1 = sp [0]->dreg;
9944 ins->type = STACK_MP;
9945 MONO_ADD_INS (cfg->cbb, ins);
9949 case CEE_MONO_LDNATIVEOBJ:
9951 * Similar to LDOBJ, but instead load the unmanaged
9952 * representation of the vtype to the stack.
9957 token = read32 (ip + 2);
9958 klass = mono_method_get_wrapper_data (method, token);
9959 g_assert (klass->valuetype);
9960 mono_class_init (klass);
9963 MonoInst *src, *dest, *temp;
9966 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
9967 temp->backend.is_pinvoke = 1;
9968 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
9969 mini_emit_stobj (cfg, dest, src, klass, TRUE);
9971 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
9972 dest->type = STACK_VTYPE;
9973 dest->klass = klass;
9979 case CEE_MONO_RETOBJ: {
9981 * Same as RET, but return the native representation of a vtype
9984 g_assert (cfg->ret);
9985 g_assert (mono_method_signature (method)->pinvoke);
9990 token = read32 (ip + 2);
9991 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9993 if (!cfg->vret_addr) {
9994 g_assert (cfg->ret_var_is_local);
9996 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
9998 EMIT_NEW_RETLOADA (cfg, ins);
10000 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
10002 if (sp != stack_start)
10005 MONO_INST_NEW (cfg, ins, OP_BR);
10006 ins->inst_target_bb = end_bblock;
10007 MONO_ADD_INS (bblock, ins);
10008 link_bblock (cfg, bblock, end_bblock);
10009 start_new_bblock = 1;
10013 case CEE_MONO_CISINST:
10014 case CEE_MONO_CCASTCLASS: {
10019 token = read32 (ip + 2);
10020 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
10021 if (ip [1] == CEE_MONO_CISINST)
10022 ins = handle_cisinst (cfg, klass, sp [0]);
10024 ins = handle_ccastclass (cfg, klass, sp [0]);
10030 case CEE_MONO_SAVE_LMF:
10031 case CEE_MONO_RESTORE_LMF:
10032 #ifdef MONO_ARCH_HAVE_LMF_OPS
10033 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
10034 MONO_ADD_INS (bblock, ins);
10035 cfg->need_lmf_area = TRUE;
10039 case CEE_MONO_CLASSCONST:
10040 CHECK_STACK_OVF (1);
10042 token = read32 (ip + 2);
10043 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
10046 inline_costs += 10 * num_calls++;
10048 case CEE_MONO_NOT_TAKEN:
10049 bblock->out_of_line = TRUE;
10053 CHECK_STACK_OVF (1);
10055 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
10056 ins->dreg = alloc_preg (cfg);
10057 ins->inst_offset = (gint32)read32 (ip + 2);
10058 ins->type = STACK_PTR;
10059 MONO_ADD_INS (bblock, ins);
10063 case CEE_MONO_DYN_CALL: {
10064 MonoCallInst *call;
10066 /* It would be easier to call a trampoline, but that would put an
10067 * extra frame on the stack, confusing exception handling. So
10068 * implement it inline using an opcode for now.
10071 if (!cfg->dyn_call_var) {
10072 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
10073 /* prevent it from being register allocated */
10074 cfg->dyn_call_var->flags |= MONO_INST_INDIRECT;
10077 /* Has to use a call inst since it local regalloc expects it */
10078 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
10079 ins = (MonoInst*)call;
10081 ins->sreg1 = sp [0]->dreg;
10082 ins->sreg2 = sp [1]->dreg;
10083 MONO_ADD_INS (bblock, ins);
10085 #ifdef MONO_ARCH_DYN_CALL_PARAM_AREA
10086 cfg->param_area = MAX (cfg->param_area, MONO_ARCH_DYN_CALL_PARAM_AREA);
10090 inline_costs += 10 * num_calls++;
10094 case CEE_MONO_MEMORY_BARRIER: {
10096 emit_memory_barrier (cfg, (int)read32 (ip + 1));
10101 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
10107 case CEE_PREFIX1: {
10110 case CEE_ARGLIST: {
10111 /* somewhat similar to LDTOKEN */
10112 MonoInst *addr, *vtvar;
10113 CHECK_STACK_OVF (1);
10114 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
10116 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
10117 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
10119 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
10120 ins->type = STACK_VTYPE;
10121 ins->klass = mono_defaults.argumenthandle_class;
10134 * The following transforms:
10135 * CEE_CEQ into OP_CEQ
10136 * CEE_CGT into OP_CGT
10137 * CEE_CGT_UN into OP_CGT_UN
10138 * CEE_CLT into OP_CLT
10139 * CEE_CLT_UN into OP_CLT_UN
10141 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
10143 MONO_INST_NEW (cfg, ins, cmp->opcode);
10145 cmp->sreg1 = sp [0]->dreg;
10146 cmp->sreg2 = sp [1]->dreg;
10147 type_from_op (cmp, sp [0], sp [1]);
10149 if ((sp [0]->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
10150 cmp->opcode = OP_LCOMPARE;
10151 else if (sp [0]->type == STACK_R8)
10152 cmp->opcode = OP_FCOMPARE;
10154 cmp->opcode = OP_ICOMPARE;
10155 MONO_ADD_INS (bblock, cmp);
10156 ins->type = STACK_I4;
10157 ins->dreg = alloc_dreg (cfg, ins->type);
10158 type_from_op (ins, sp [0], sp [1]);
10160 if (cmp->opcode == OP_FCOMPARE) {
10162 * The backends expect the fceq opcodes to do the
10165 cmp->opcode = OP_NOP;
10166 ins->sreg1 = cmp->sreg1;
10167 ins->sreg2 = cmp->sreg2;
10169 MONO_ADD_INS (bblock, ins);
10175 MonoInst *argconst;
10176 MonoMethod *cil_method;
10178 CHECK_STACK_OVF (1);
10180 n = read32 (ip + 2);
10181 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
10182 if (!cmethod || mono_loader_get_last_error ())
10184 mono_class_init (cmethod->klass);
10186 mono_save_token_info (cfg, image, n, cmethod);
10188 if (cfg->generic_sharing_context)
10189 context_used = mono_method_check_context_used (cmethod);
10191 cil_method = cmethod;
10192 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
10193 METHOD_ACCESS_FAILURE;
10195 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
10196 if (check_linkdemand (cfg, method, cmethod))
10198 CHECK_CFG_EXCEPTION;
10199 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
10200 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
10204 * Optimize the common case of ldftn+delegate creation
10206 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
10207 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
10208 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
10209 MonoInst *target_ins;
10210 MonoMethod *invoke;
10211 int invoke_context_used = 0;
10213 invoke = mono_get_delegate_invoke (ctor_method->klass);
10214 if (!invoke || !mono_method_signature (invoke))
10217 if (cfg->generic_sharing_context)
10218 invoke_context_used = mono_method_check_context_used (invoke);
10220 target_ins = sp [-1];
10222 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
10223 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method, bblock, ip);
10225 if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
10226 /*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
10227 if (mono_method_signature (invoke)->param_count == mono_method_signature (cmethod)->param_count) {
10228 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0);
10229 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException");
10233 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE)
10234 /* FIXME: SGEN support */
10235 if (invoke_context_used == 0) {
10237 if (cfg->verbose_level > 3)
10238 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
10240 *sp = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used);
10241 CHECK_CFG_EXCEPTION;
10250 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
10251 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
10255 inline_costs += 10 * num_calls++;
10258 case CEE_LDVIRTFTN: {
10259 MonoInst *args [2];
10263 n = read32 (ip + 2);
10264 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
10265 if (!cmethod || mono_loader_get_last_error ())
10267 mono_class_init (cmethod->klass);
10269 if (cfg->generic_sharing_context)
10270 context_used = mono_method_check_context_used (cmethod);
10272 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
10273 if (check_linkdemand (cfg, method, cmethod))
10275 CHECK_CFG_EXCEPTION;
10276 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
10277 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
10283 args [1] = emit_get_rgctx_method (cfg, context_used,
10284 cmethod, MONO_RGCTX_INFO_METHOD);
10287 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
10289 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
10292 inline_costs += 10 * num_calls++;
10296 CHECK_STACK_OVF (1);
10298 n = read16 (ip + 2);
10300 EMIT_NEW_ARGLOAD (cfg, ins, n);
10305 CHECK_STACK_OVF (1);
10307 n = read16 (ip + 2);
10309 NEW_ARGLOADA (cfg, ins, n);
10310 MONO_ADD_INS (cfg->cbb, ins);
10318 n = read16 (ip + 2);
10320 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
10322 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
10326 CHECK_STACK_OVF (1);
10328 n = read16 (ip + 2);
10330 EMIT_NEW_LOCLOAD (cfg, ins, n);
10335 unsigned char *tmp_ip;
10336 CHECK_STACK_OVF (1);
10338 n = read16 (ip + 2);
10341 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
10347 EMIT_NEW_LOCLOADA (cfg, ins, n);
10356 n = read16 (ip + 2);
10358 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
10360 emit_stloc_ir (cfg, sp, header, n);
10367 if (sp != stack_start)
10369 if (cfg->method != method)
10371 * Inlining this into a loop in a parent could lead to
10372 * stack overflows which is different behavior than the
10373 * non-inlined case, thus disable inlining in this case.
10375 goto inline_failure;
10377 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
10378 ins->dreg = alloc_preg (cfg);
10379 ins->sreg1 = sp [0]->dreg;
10380 ins->type = STACK_PTR;
10381 MONO_ADD_INS (cfg->cbb, ins);
10383 cfg->flags |= MONO_CFG_HAS_ALLOCA;
10385 ins->flags |= MONO_INST_INIT;
10390 case CEE_ENDFILTER: {
10391 MonoExceptionClause *clause, *nearest;
10392 int cc, nearest_num;
10396 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
10398 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
10399 ins->sreg1 = (*sp)->dreg;
10400 MONO_ADD_INS (bblock, ins);
10401 start_new_bblock = 1;
10406 for (cc = 0; cc < header->num_clauses; ++cc) {
10407 clause = &header->clauses [cc];
10408 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
10409 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
10410 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
10415 g_assert (nearest);
10416 if ((ip - header->code) != nearest->handler_offset)
10421 case CEE_UNALIGNED_:
10422 ins_flag |= MONO_INST_UNALIGNED;
10423 /* FIXME: record alignment? we can assume 1 for now */
10427 case CEE_VOLATILE_:
10428 ins_flag |= MONO_INST_VOLATILE;
10432 ins_flag |= MONO_INST_TAILCALL;
10433 cfg->flags |= MONO_CFG_HAS_TAIL;
10434 /* Can't inline tail calls at this time */
10435 inline_costs += 100000;
10442 token = read32 (ip + 2);
10443 klass = mini_get_class (method, token, generic_context);
10444 CHECK_TYPELOAD (klass);
10445 if (generic_class_is_reference_type (cfg, klass))
10446 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
10448 mini_emit_initobj (cfg, *sp, NULL, klass);
10452 case CEE_CONSTRAINED_:
10454 token = read32 (ip + 2);
10455 if (method->wrapper_type != MONO_WRAPPER_NONE)
10456 constrained_call = (MonoClass *)mono_method_get_wrapper_data (method, token);
10458 constrained_call = mono_class_get_full (image, token, generic_context);
10459 CHECK_TYPELOAD (constrained_call);
10463 case CEE_INITBLK: {
10464 MonoInst *iargs [3];
10468 if ((ip [1] == CEE_CPBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
10469 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
10470 } else if ((ip [1] == CEE_INITBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
10471 /* emit_memset only works when val == 0 */
10472 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
10474 iargs [0] = sp [0];
10475 iargs [1] = sp [1];
10476 iargs [2] = sp [2];
10477 if (ip [1] == CEE_CPBLK) {
10478 MonoMethod *memcpy_method = get_memcpy_method ();
10479 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
10481 MonoMethod *memset_method = get_memset_method ();
10482 mono_emit_method_call (cfg, memset_method, iargs, NULL);
10492 ins_flag |= MONO_INST_NOTYPECHECK;
10494 ins_flag |= MONO_INST_NORANGECHECK;
10495 /* we ignore the no-nullcheck for now since we
10496 * really do it explicitly only when doing callvirt->call
10500 case CEE_RETHROW: {
10502 int handler_offset = -1;
10504 for (i = 0; i < header->num_clauses; ++i) {
10505 MonoExceptionClause *clause = &header->clauses [i];
10506 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
10507 handler_offset = clause->handler_offset;
10512 bblock->flags |= BB_EXCEPTION_UNSAFE;
10514 g_assert (handler_offset != -1);
10516 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
10517 MONO_INST_NEW (cfg, ins, OP_RETHROW);
10518 ins->sreg1 = load->dreg;
10519 MONO_ADD_INS (bblock, ins);
10521 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
10522 MONO_ADD_INS (bblock, ins);
10525 link_bblock (cfg, bblock, end_bblock);
10526 start_new_bblock = 1;
10534 CHECK_STACK_OVF (1);
10536 token = read32 (ip + 2);
10537 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !method->klass->image->dynamic && !generic_context) {
10538 MonoType *type = mono_type_create_from_typespec (image, token);
10539 token = mono_type_size (type, &ialign);
10541 MonoClass *klass = mono_class_get_full (image, token, generic_context);
10542 CHECK_TYPELOAD (klass);
10543 mono_class_init (klass);
10544 token = mono_class_value_size (klass, &align);
10546 EMIT_NEW_ICONST (cfg, ins, token);
10551 case CEE_REFANYTYPE: {
10552 MonoInst *src_var, *src;
10558 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
10560 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
10561 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
10562 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, type));
10567 case CEE_READONLY_:
10580 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
10590 g_warning ("opcode 0x%02x not handled", *ip);
10594 if (start_new_bblock != 1)
10597 bblock->cil_length = ip - bblock->cil_code;
10598 if (bblock->next_bb) {
10599 /* This could already be set because of inlining, #693905 */
10600 MonoBasicBlock *bb = bblock;
10602 while (bb->next_bb)
10604 bb->next_bb = end_bblock;
10606 bblock->next_bb = end_bblock;
10609 if (cfg->method == method && cfg->domainvar) {
10611 MonoInst *get_domain;
10613 cfg->cbb = init_localsbb;
10615 if (! (get_domain = mono_arch_get_domain_intrinsic (cfg))) {
10616 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
10619 get_domain->dreg = alloc_preg (cfg);
10620 MONO_ADD_INS (cfg->cbb, get_domain);
10622 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
10623 MONO_ADD_INS (cfg->cbb, store);
10626 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
10627 if (cfg->compile_aot)
10628 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
10629 mono_get_got_var (cfg);
10632 if (cfg->method == method && cfg->got_var)
10633 mono_emit_load_got_addr (cfg);
10638 cfg->cbb = init_localsbb;
10640 for (i = 0; i < header->num_locals; ++i) {
10641 MonoType *ptype = header->locals [i];
10642 int t = ptype->type;
10643 dreg = cfg->locals [i]->dreg;
10645 if (t == MONO_TYPE_VALUETYPE && ptype->data.klass->enumtype)
10646 t = mono_class_enum_basetype (ptype->data.klass)->type;
10647 if (ptype->byref) {
10648 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
10649 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
10650 MONO_EMIT_NEW_ICONST (cfg, cfg->locals [i]->dreg, 0);
10651 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
10652 MONO_EMIT_NEW_I8CONST (cfg, cfg->locals [i]->dreg, 0);
10653 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
10654 MONO_INST_NEW (cfg, ins, OP_R8CONST);
10655 ins->type = STACK_R8;
10656 ins->inst_p0 = (void*)&r8_0;
10657 ins->dreg = alloc_dreg (cfg, STACK_R8);
10658 MONO_ADD_INS (init_localsbb, ins);
10659 EMIT_NEW_LOCSTORE (cfg, store, i, ins);
10660 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
10661 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (ptype))) {
10662 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (ptype));
10664 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
10669 if (cfg->init_ref_vars && cfg->method == method) {
10670 /* Emit initialization for ref vars */
10671 // FIXME: Avoid duplication initialization for IL locals.
10672 for (i = 0; i < cfg->num_varinfo; ++i) {
10673 MonoInst *ins = cfg->varinfo [i];
10675 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
10676 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
10681 MonoBasicBlock *bb;
10684 * Make seq points at backward branch targets interruptable.
10686 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
10687 if (bb->code && bb->in_count > 1 && bb->code->opcode == OP_SEQ_POINT)
10688 bb->code->flags |= MONO_INST_SINGLE_STEP_LOC;
10691 /* Add a sequence point for method entry/exit events */
10693 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
10694 MONO_ADD_INS (init_localsbb, ins);
10695 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
10696 MONO_ADD_INS (cfg->bb_exit, ins);
10701 if (cfg->method == method) {
10702 MonoBasicBlock *bb;
10703 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10704 bb->region = mono_find_block_region (cfg, bb->real_offset);
10706 mono_create_spvar_for_region (cfg, bb->region);
10707 if (cfg->verbose_level > 2)
10708 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
10712 g_slist_free (class_inits);
10713 dont_inline = g_list_remove (dont_inline, method);
10715 if (inline_costs < 0) {
10718 /* Method is too large */
10719 mname = mono_method_full_name (method, TRUE);
10720 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
10721 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
10723 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
10724 mono_basic_block_free (original_bb);
10728 if ((cfg->verbose_level > 2) && (cfg->method == method))
10729 mono_print_code (cfg, "AFTER METHOD-TO-IR");
10731 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
10732 mono_basic_block_free (original_bb);
10733 return inline_costs;
10736 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
10743 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
10747 set_exception_type_from_invalid_il (cfg, method, ip);
10751 g_slist_free (class_inits);
10752 mono_basic_block_free (original_bb);
10753 dont_inline = g_list_remove (dont_inline, method);
10754 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
10759 store_membase_reg_to_store_membase_imm (int opcode)
10762 case OP_STORE_MEMBASE_REG:
10763 return OP_STORE_MEMBASE_IMM;
10764 case OP_STOREI1_MEMBASE_REG:
10765 return OP_STOREI1_MEMBASE_IMM;
10766 case OP_STOREI2_MEMBASE_REG:
10767 return OP_STOREI2_MEMBASE_IMM;
10768 case OP_STOREI4_MEMBASE_REG:
10769 return OP_STOREI4_MEMBASE_IMM;
10770 case OP_STOREI8_MEMBASE_REG:
10771 return OP_STOREI8_MEMBASE_IMM;
10773 g_assert_not_reached ();
10779 #endif /* DISABLE_JIT */
10782 mono_op_to_op_imm (int opcode)
10786 return OP_IADD_IMM;
10788 return OP_ISUB_IMM;
10790 return OP_IDIV_IMM;
10792 return OP_IDIV_UN_IMM;
10794 return OP_IREM_IMM;
10796 return OP_IREM_UN_IMM;
10798 return OP_IMUL_IMM;
10800 return OP_IAND_IMM;
10804 return OP_IXOR_IMM;
10806 return OP_ISHL_IMM;
10808 return OP_ISHR_IMM;
10810 return OP_ISHR_UN_IMM;
10813 return OP_LADD_IMM;
10815 return OP_LSUB_IMM;
10817 return OP_LAND_IMM;
10821 return OP_LXOR_IMM;
10823 return OP_LSHL_IMM;
10825 return OP_LSHR_IMM;
10827 return OP_LSHR_UN_IMM;
10830 return OP_COMPARE_IMM;
10832 return OP_ICOMPARE_IMM;
10834 return OP_LCOMPARE_IMM;
10836 case OP_STORE_MEMBASE_REG:
10837 return OP_STORE_MEMBASE_IMM;
10838 case OP_STOREI1_MEMBASE_REG:
10839 return OP_STOREI1_MEMBASE_IMM;
10840 case OP_STOREI2_MEMBASE_REG:
10841 return OP_STOREI2_MEMBASE_IMM;
10842 case OP_STOREI4_MEMBASE_REG:
10843 return OP_STOREI4_MEMBASE_IMM;
10845 #if defined(TARGET_X86) || defined (TARGET_AMD64)
10847 return OP_X86_PUSH_IMM;
10848 case OP_X86_COMPARE_MEMBASE_REG:
10849 return OP_X86_COMPARE_MEMBASE_IMM;
10851 #if defined(TARGET_AMD64)
10852 case OP_AMD64_ICOMPARE_MEMBASE_REG:
10853 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
10855 case OP_VOIDCALL_REG:
10856 return OP_VOIDCALL;
10864 return OP_LOCALLOC_IMM;
10871 ldind_to_load_membase (int opcode)
10875 return OP_LOADI1_MEMBASE;
10877 return OP_LOADU1_MEMBASE;
10879 return OP_LOADI2_MEMBASE;
10881 return OP_LOADU2_MEMBASE;
10883 return OP_LOADI4_MEMBASE;
10885 return OP_LOADU4_MEMBASE;
10887 return OP_LOAD_MEMBASE;
10888 case CEE_LDIND_REF:
10889 return OP_LOAD_MEMBASE;
10891 return OP_LOADI8_MEMBASE;
10893 return OP_LOADR4_MEMBASE;
10895 return OP_LOADR8_MEMBASE;
10897 g_assert_not_reached ();
10904 stind_to_store_membase (int opcode)
10908 return OP_STOREI1_MEMBASE_REG;
10910 return OP_STOREI2_MEMBASE_REG;
10912 return OP_STOREI4_MEMBASE_REG;
10914 case CEE_STIND_REF:
10915 return OP_STORE_MEMBASE_REG;
10917 return OP_STOREI8_MEMBASE_REG;
10919 return OP_STORER4_MEMBASE_REG;
10921 return OP_STORER8_MEMBASE_REG;
10923 g_assert_not_reached ();
10930 mono_load_membase_to_load_mem (int opcode)
10932 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
10933 #if defined(TARGET_X86) || defined(TARGET_AMD64)
10935 case OP_LOAD_MEMBASE:
10936 return OP_LOAD_MEM;
10937 case OP_LOADU1_MEMBASE:
10938 return OP_LOADU1_MEM;
10939 case OP_LOADU2_MEMBASE:
10940 return OP_LOADU2_MEM;
10941 case OP_LOADI4_MEMBASE:
10942 return OP_LOADI4_MEM;
10943 case OP_LOADU4_MEMBASE:
10944 return OP_LOADU4_MEM;
10945 #if SIZEOF_REGISTER == 8
10946 case OP_LOADI8_MEMBASE:
10947 return OP_LOADI8_MEM;
10956 op_to_op_dest_membase (int store_opcode, int opcode)
10958 #if defined(TARGET_X86)
10959 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
10964 return OP_X86_ADD_MEMBASE_REG;
10966 return OP_X86_SUB_MEMBASE_REG;
10968 return OP_X86_AND_MEMBASE_REG;
10970 return OP_X86_OR_MEMBASE_REG;
10972 return OP_X86_XOR_MEMBASE_REG;
10975 return OP_X86_ADD_MEMBASE_IMM;
10978 return OP_X86_SUB_MEMBASE_IMM;
10981 return OP_X86_AND_MEMBASE_IMM;
10984 return OP_X86_OR_MEMBASE_IMM;
10987 return OP_X86_XOR_MEMBASE_IMM;
10993 #if defined(TARGET_AMD64)
10994 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
10999 return OP_X86_ADD_MEMBASE_REG;
11001 return OP_X86_SUB_MEMBASE_REG;
11003 return OP_X86_AND_MEMBASE_REG;
11005 return OP_X86_OR_MEMBASE_REG;
11007 return OP_X86_XOR_MEMBASE_REG;
11009 return OP_X86_ADD_MEMBASE_IMM;
11011 return OP_X86_SUB_MEMBASE_IMM;
11013 return OP_X86_AND_MEMBASE_IMM;
11015 return OP_X86_OR_MEMBASE_IMM;
11017 return OP_X86_XOR_MEMBASE_IMM;
11019 return OP_AMD64_ADD_MEMBASE_REG;
11021 return OP_AMD64_SUB_MEMBASE_REG;
11023 return OP_AMD64_AND_MEMBASE_REG;
11025 return OP_AMD64_OR_MEMBASE_REG;
11027 return OP_AMD64_XOR_MEMBASE_REG;
11030 return OP_AMD64_ADD_MEMBASE_IMM;
11033 return OP_AMD64_SUB_MEMBASE_IMM;
11036 return OP_AMD64_AND_MEMBASE_IMM;
11039 return OP_AMD64_OR_MEMBASE_IMM;
11042 return OP_AMD64_XOR_MEMBASE_IMM;
11052 op_to_op_store_membase (int store_opcode, int opcode)
11054 #if defined(TARGET_X86) || defined(TARGET_AMD64)
11057 if (store_opcode == OP_STOREI1_MEMBASE_REG)
11058 return OP_X86_SETEQ_MEMBASE;
11060 if (store_opcode == OP_STOREI1_MEMBASE_REG)
11061 return OP_X86_SETNE_MEMBASE;
11069 op_to_op_src1_membase (int load_opcode, int opcode)
11072 /* FIXME: This has sign extension issues */
11074 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
11075 return OP_X86_COMPARE_MEMBASE8_IMM;
11078 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
11083 return OP_X86_PUSH_MEMBASE;
11084 case OP_COMPARE_IMM:
11085 case OP_ICOMPARE_IMM:
11086 return OP_X86_COMPARE_MEMBASE_IMM;
11089 return OP_X86_COMPARE_MEMBASE_REG;
11093 #ifdef TARGET_AMD64
11094 /* FIXME: This has sign extension issues */
11096 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
11097 return OP_X86_COMPARE_MEMBASE8_IMM;
11102 #ifdef __mono_ilp32__
11103 if (load_opcode == OP_LOADI8_MEMBASE)
11105 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
11107 return OP_X86_PUSH_MEMBASE;
11109 /* FIXME: This only works for 32 bit immediates
11110 case OP_COMPARE_IMM:
11111 case OP_LCOMPARE_IMM:
11112 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
11113 return OP_AMD64_COMPARE_MEMBASE_IMM;
11115 case OP_ICOMPARE_IMM:
11116 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
11117 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
11121 #ifdef __mono_ilp32__
11122 if (load_opcode == OP_LOAD_MEMBASE)
11123 return OP_AMD64_ICOMPARE_MEMBASE_REG;
11124 if (load_opcode == OP_LOADI8_MEMBASE)
11126 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
11128 return OP_AMD64_COMPARE_MEMBASE_REG;
11131 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
11132 return OP_AMD64_ICOMPARE_MEMBASE_REG;
11141 op_to_op_src2_membase (int load_opcode, int opcode)
11144 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
11150 return OP_X86_COMPARE_REG_MEMBASE;
11152 return OP_X86_ADD_REG_MEMBASE;
11154 return OP_X86_SUB_REG_MEMBASE;
11156 return OP_X86_AND_REG_MEMBASE;
11158 return OP_X86_OR_REG_MEMBASE;
11160 return OP_X86_XOR_REG_MEMBASE;
11164 #ifdef TARGET_AMD64
11165 #ifdef __mono_ilp32__
11166 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE) ) {
11168 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)) {
11172 return OP_AMD64_ICOMPARE_REG_MEMBASE;
11174 return OP_X86_ADD_REG_MEMBASE;
11176 return OP_X86_SUB_REG_MEMBASE;
11178 return OP_X86_AND_REG_MEMBASE;
11180 return OP_X86_OR_REG_MEMBASE;
11182 return OP_X86_XOR_REG_MEMBASE;
11184 #ifdef __mono_ilp32__
11185 } else if (load_opcode == OP_LOADI8_MEMBASE) {
11187 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE)) {
11192 return OP_AMD64_COMPARE_REG_MEMBASE;
11194 return OP_AMD64_ADD_REG_MEMBASE;
11196 return OP_AMD64_SUB_REG_MEMBASE;
11198 return OP_AMD64_AND_REG_MEMBASE;
11200 return OP_AMD64_OR_REG_MEMBASE;
11202 return OP_AMD64_XOR_REG_MEMBASE;
11211 mono_op_to_op_imm_noemul (int opcode)
11214 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
11220 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
11228 return mono_op_to_op_imm (opcode);
11232 #ifndef DISABLE_JIT
11235 * mono_handle_global_vregs:
11237 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
11241 mono_handle_global_vregs (MonoCompile *cfg)
11243 gint32 *vreg_to_bb;
11244 MonoBasicBlock *bb;
11247 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
11249 #ifdef MONO_ARCH_SIMD_INTRINSICS
11250 if (cfg->uses_simd_intrinsics)
11251 mono_simd_simplify_indirection (cfg);
11254 /* Find local vregs used in more than one bb */
11255 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
11256 MonoInst *ins = bb->code;
11257 int block_num = bb->block_num;
11259 if (cfg->verbose_level > 2)
11260 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
11263 for (; ins; ins = ins->next) {
11264 const char *spec = INS_INFO (ins->opcode);
11265 int regtype = 0, regindex;
11268 if (G_UNLIKELY (cfg->verbose_level > 2))
11269 mono_print_ins (ins);
11271 g_assert (ins->opcode >= MONO_CEE_LAST);
11273 for (regindex = 0; regindex < 4; regindex ++) {
11276 if (regindex == 0) {
11277 regtype = spec [MONO_INST_DEST];
11278 if (regtype == ' ')
11281 } else if (regindex == 1) {
11282 regtype = spec [MONO_INST_SRC1];
11283 if (regtype == ' ')
11286 } else if (regindex == 2) {
11287 regtype = spec [MONO_INST_SRC2];
11288 if (regtype == ' ')
11291 } else if (regindex == 3) {
11292 regtype = spec [MONO_INST_SRC3];
11293 if (regtype == ' ')
11298 #if SIZEOF_REGISTER == 4
11299 /* In the LLVM case, the long opcodes are not decomposed */
11300 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
11302 * Since some instructions reference the original long vreg,
11303 * and some reference the two component vregs, it is quite hard
11304 * to determine when it needs to be global. So be conservative.
11306 if (!get_vreg_to_inst (cfg, vreg)) {
11307 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
11309 if (cfg->verbose_level > 2)
11310 printf ("LONG VREG R%d made global.\n", vreg);
11314 * Make the component vregs volatile since the optimizations can
11315 * get confused otherwise.
11317 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
11318 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
11322 g_assert (vreg != -1);
11324 prev_bb = vreg_to_bb [vreg];
11325 if (prev_bb == 0) {
11326 /* 0 is a valid block num */
11327 vreg_to_bb [vreg] = block_num + 1;
11328 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
11329 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
11332 if (!get_vreg_to_inst (cfg, vreg)) {
11333 if (G_UNLIKELY (cfg->verbose_level > 2))
11334 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
11338 if (vreg_is_ref (cfg, vreg))
11339 mono_compile_create_var_for_vreg (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL, vreg);
11341 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
11344 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
11347 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
11350 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
11353 g_assert_not_reached ();
11357 /* Flag as having been used in more than one bb */
11358 vreg_to_bb [vreg] = -1;
11364 /* If a variable is used in only one bblock, convert it into a local vreg */
11365 for (i = 0; i < cfg->num_varinfo; i++) {
11366 MonoInst *var = cfg->varinfo [i];
11367 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
11369 switch (var->type) {
11375 #if SIZEOF_REGISTER == 8
11378 #if !defined(TARGET_X86) && !defined(MONO_ARCH_SOFT_FLOAT)
11379 /* Enabling this screws up the fp stack on x86 */
11382 /* Arguments are implicitly global */
11383 /* Putting R4 vars into registers doesn't work currently */
11384 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg) {
11386 * Make that the variable's liveness interval doesn't contain a call, since
11387 * that would cause the lvreg to be spilled, making the whole optimization
11390 /* This is too slow for JIT compilation */
11392 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
11394 int def_index, call_index, ins_index;
11395 gboolean spilled = FALSE;
11400 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
11401 const char *spec = INS_INFO (ins->opcode);
11403 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
11404 def_index = ins_index;
11406 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
11407 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
11408 if (call_index > def_index) {
11414 if (MONO_IS_CALL (ins))
11415 call_index = ins_index;
11425 if (G_UNLIKELY (cfg->verbose_level > 2))
11426 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
11427 var->flags |= MONO_INST_IS_DEAD;
11428 cfg->vreg_to_inst [var->dreg] = NULL;
11435 * Compress the varinfo and vars tables so the liveness computation is faster and
11436 * takes up less space.
11439 for (i = 0; i < cfg->num_varinfo; ++i) {
11440 MonoInst *var = cfg->varinfo [i];
11441 if (pos < i && cfg->locals_start == i)
11442 cfg->locals_start = pos;
11443 if (!(var->flags & MONO_INST_IS_DEAD)) {
11445 cfg->varinfo [pos] = cfg->varinfo [i];
11446 cfg->varinfo [pos]->inst_c0 = pos;
11447 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
11448 cfg->vars [pos].idx = pos;
11449 #if SIZEOF_REGISTER == 4
11450 if (cfg->varinfo [pos]->type == STACK_I8) {
11451 /* Modify the two component vars too */
11454 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
11455 var1->inst_c0 = pos;
11456 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
11457 var1->inst_c0 = pos;
11464 cfg->num_varinfo = pos;
11465 if (cfg->locals_start > cfg->num_varinfo)
11466 cfg->locals_start = cfg->num_varinfo;
11470 * mono_spill_global_vars:
11472 * Generate spill code for variables which are not allocated to registers,
11473 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
11474 * code is generated which could be optimized by the local optimization passes.
11477 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
11479 MonoBasicBlock *bb;
11481 int orig_next_vreg;
11482 guint32 *vreg_to_lvreg;
11484 guint32 i, lvregs_len;
11485 gboolean dest_has_lvreg = FALSE;
11486 guint32 stacktypes [128];
11487 MonoInst **live_range_start, **live_range_end;
11488 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
11490 *need_local_opts = FALSE;
11492 memset (spec2, 0, sizeof (spec2));
11494 /* FIXME: Move this function to mini.c */
11495 stacktypes ['i'] = STACK_PTR;
11496 stacktypes ['l'] = STACK_I8;
11497 stacktypes ['f'] = STACK_R8;
11498 #ifdef MONO_ARCH_SIMD_INTRINSICS
11499 stacktypes ['x'] = STACK_VTYPE;
11502 #if SIZEOF_REGISTER == 4
11503 /* Create MonoInsts for longs */
11504 for (i = 0; i < cfg->num_varinfo; i++) {
11505 MonoInst *ins = cfg->varinfo [i];
11507 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
11508 switch (ins->type) {
11513 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
11516 g_assert (ins->opcode == OP_REGOFFSET);
11518 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
11520 tree->opcode = OP_REGOFFSET;
11521 tree->inst_basereg = ins->inst_basereg;
11522 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
11524 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
11526 tree->opcode = OP_REGOFFSET;
11527 tree->inst_basereg = ins->inst_basereg;
11528 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
11538 if (cfg->compute_gc_maps) {
11539 /* registers need liveness info even for !non refs */
11540 for (i = 0; i < cfg->num_varinfo; i++) {
11541 MonoInst *ins = cfg->varinfo [i];
11543 if (ins->opcode == OP_REGVAR)
11544 ins->flags |= MONO_INST_GC_TRACK;
11548 /* FIXME: widening and truncation */
11551 * As an optimization, when a variable allocated to the stack is first loaded into
11552 * an lvreg, we will remember the lvreg and use it the next time instead of loading
11553 * the variable again.
11555 orig_next_vreg = cfg->next_vreg;
11556 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
11557 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
11561 * These arrays contain the first and last instructions accessing a given
11563 * Since we emit bblocks in the same order we process them here, and we
11564 * don't split live ranges, these will precisely describe the live range of
11565 * the variable, i.e. the instruction range where a valid value can be found
11566 * in the variables location.
11567 * The live range is computed using the liveness info computed by the liveness pass.
11568 * We can't use vmv->range, since that is an abstract live range, and we need
11569 * one which is instruction precise.
11570 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
11572 /* FIXME: Only do this if debugging info is requested */
11573 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
11574 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
11575 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
11576 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
11578 /* Add spill loads/stores */
11579 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
11582 if (cfg->verbose_level > 2)
11583 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
11585 /* Clear vreg_to_lvreg array */
11586 for (i = 0; i < lvregs_len; i++)
11587 vreg_to_lvreg [lvregs [i]] = 0;
11591 MONO_BB_FOR_EACH_INS (bb, ins) {
11592 const char *spec = INS_INFO (ins->opcode);
11593 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
11594 gboolean store, no_lvreg;
11595 int sregs [MONO_MAX_SRC_REGS];
11597 if (G_UNLIKELY (cfg->verbose_level > 2))
11598 mono_print_ins (ins);
11600 if (ins->opcode == OP_NOP)
11604 * We handle LDADDR here as well, since it can only be decomposed
11605 * when variable addresses are known.
11607 if (ins->opcode == OP_LDADDR) {
11608 MonoInst *var = ins->inst_p0;
11610 if (var->opcode == OP_VTARG_ADDR) {
11611 /* Happens on SPARC/S390 where vtypes are passed by reference */
11612 MonoInst *vtaddr = var->inst_left;
11613 if (vtaddr->opcode == OP_REGVAR) {
11614 ins->opcode = OP_MOVE;
11615 ins->sreg1 = vtaddr->dreg;
11617 else if (var->inst_left->opcode == OP_REGOFFSET) {
11618 ins->opcode = OP_LOAD_MEMBASE;
11619 ins->inst_basereg = vtaddr->inst_basereg;
11620 ins->inst_offset = vtaddr->inst_offset;
11624 g_assert (var->opcode == OP_REGOFFSET);
11626 ins->opcode = OP_ADD_IMM;
11627 ins->sreg1 = var->inst_basereg;
11628 ins->inst_imm = var->inst_offset;
11631 *need_local_opts = TRUE;
11632 spec = INS_INFO (ins->opcode);
11635 if (ins->opcode < MONO_CEE_LAST) {
11636 mono_print_ins (ins);
11637 g_assert_not_reached ();
11641 * Store opcodes have destbasereg in the dreg, but in reality, it is an
11645 if (MONO_IS_STORE_MEMBASE (ins)) {
11646 tmp_reg = ins->dreg;
11647 ins->dreg = ins->sreg2;
11648 ins->sreg2 = tmp_reg;
11651 spec2 [MONO_INST_DEST] = ' ';
11652 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
11653 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
11654 spec2 [MONO_INST_SRC3] = ' ';
11656 } else if (MONO_IS_STORE_MEMINDEX (ins))
11657 g_assert_not_reached ();
11662 if (G_UNLIKELY (cfg->verbose_level > 2)) {
11663 printf ("\t %.3s %d", spec, ins->dreg);
11664 num_sregs = mono_inst_get_src_registers (ins, sregs);
11665 for (srcindex = 0; srcindex < num_sregs; ++srcindex)
11666 printf (" %d", sregs [srcindex]);
11673 regtype = spec [MONO_INST_DEST];
11674 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
11677 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
11678 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
11679 MonoInst *store_ins;
11681 MonoInst *def_ins = ins;
11682 int dreg = ins->dreg; /* The original vreg */
11684 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
11686 if (var->opcode == OP_REGVAR) {
11687 ins->dreg = var->dreg;
11688 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
11690 * Instead of emitting a load+store, use a _membase opcode.
11692 g_assert (var->opcode == OP_REGOFFSET);
11693 if (ins->opcode == OP_MOVE) {
11697 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
11698 ins->inst_basereg = var->inst_basereg;
11699 ins->inst_offset = var->inst_offset;
11702 spec = INS_INFO (ins->opcode);
11706 g_assert (var->opcode == OP_REGOFFSET);
11708 prev_dreg = ins->dreg;
11710 /* Invalidate any previous lvreg for this vreg */
11711 vreg_to_lvreg [ins->dreg] = 0;
11715 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
11717 store_opcode = OP_STOREI8_MEMBASE_REG;
11720 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
11722 if (regtype == 'l') {
11723 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
11724 mono_bblock_insert_after_ins (bb, ins, store_ins);
11725 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
11726 mono_bblock_insert_after_ins (bb, ins, store_ins);
11727 def_ins = store_ins;
11730 g_assert (store_opcode != OP_STOREV_MEMBASE);
11732 /* Try to fuse the store into the instruction itself */
11733 /* FIXME: Add more instructions */
11734 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
11735 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
11736 ins->inst_imm = ins->inst_c0;
11737 ins->inst_destbasereg = var->inst_basereg;
11738 ins->inst_offset = var->inst_offset;
11739 spec = INS_INFO (ins->opcode);
11740 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
11741 ins->opcode = store_opcode;
11742 ins->inst_destbasereg = var->inst_basereg;
11743 ins->inst_offset = var->inst_offset;
11747 tmp_reg = ins->dreg;
11748 ins->dreg = ins->sreg2;
11749 ins->sreg2 = tmp_reg;
11752 spec2 [MONO_INST_DEST] = ' ';
11753 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
11754 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
11755 spec2 [MONO_INST_SRC3] = ' ';
11757 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
11758 // FIXME: The backends expect the base reg to be in inst_basereg
11759 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
11761 ins->inst_basereg = var->inst_basereg;
11762 ins->inst_offset = var->inst_offset;
11763 spec = INS_INFO (ins->opcode);
11765 /* printf ("INS: "); mono_print_ins (ins); */
11766 /* Create a store instruction */
11767 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
11769 /* Insert it after the instruction */
11770 mono_bblock_insert_after_ins (bb, ins, store_ins);
11772 def_ins = store_ins;
11775 * We can't assign ins->dreg to var->dreg here, since the
11776 * sregs could use it. So set a flag, and do it after
11779 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
11780 dest_has_lvreg = TRUE;
11785 if (def_ins && !live_range_start [dreg]) {
11786 live_range_start [dreg] = def_ins;
11787 live_range_start_bb [dreg] = bb;
11790 if (cfg->compute_gc_maps && def_ins && (var->flags & MONO_INST_GC_TRACK)) {
11793 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_DEF);
11794 tmp->inst_c1 = dreg;
11795 mono_bblock_insert_after_ins (bb, def_ins, tmp);
11802 num_sregs = mono_inst_get_src_registers (ins, sregs);
11803 for (srcindex = 0; srcindex < 3; ++srcindex) {
11804 regtype = spec [MONO_INST_SRC1 + srcindex];
11805 sreg = sregs [srcindex];
11807 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
11808 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
11809 MonoInst *var = get_vreg_to_inst (cfg, sreg);
11810 MonoInst *use_ins = ins;
11811 MonoInst *load_ins;
11812 guint32 load_opcode;
11814 if (var->opcode == OP_REGVAR) {
11815 sregs [srcindex] = var->dreg;
11816 //mono_inst_set_src_registers (ins, sregs);
11817 live_range_end [sreg] = use_ins;
11818 live_range_end_bb [sreg] = bb;
11820 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
11823 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
11824 /* var->dreg is a hreg */
11825 tmp->inst_c1 = sreg;
11826 mono_bblock_insert_after_ins (bb, ins, tmp);
11832 g_assert (var->opcode == OP_REGOFFSET);
11834 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
11836 g_assert (load_opcode != OP_LOADV_MEMBASE);
11838 if (vreg_to_lvreg [sreg]) {
11839 g_assert (vreg_to_lvreg [sreg] != -1);
11841 /* The variable is already loaded to an lvreg */
11842 if (G_UNLIKELY (cfg->verbose_level > 2))
11843 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
11844 sregs [srcindex] = vreg_to_lvreg [sreg];
11845 //mono_inst_set_src_registers (ins, sregs);
11849 /* Try to fuse the load into the instruction */
11850 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
11851 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
11852 sregs [0] = var->inst_basereg;
11853 //mono_inst_set_src_registers (ins, sregs);
11854 ins->inst_offset = var->inst_offset;
11855 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
11856 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
11857 sregs [1] = var->inst_basereg;
11858 //mono_inst_set_src_registers (ins, sregs);
11859 ins->inst_offset = var->inst_offset;
11861 if (MONO_IS_REAL_MOVE (ins)) {
11862 ins->opcode = OP_NOP;
11865 //printf ("%d ", srcindex); mono_print_ins (ins);
11867 sreg = alloc_dreg (cfg, stacktypes [regtype]);
11869 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
11870 if (var->dreg == prev_dreg) {
11872 * sreg refers to the value loaded by the load
11873 * emitted below, but we need to use ins->dreg
11874 * since it refers to the store emitted earlier.
11878 g_assert (sreg != -1);
11879 vreg_to_lvreg [var->dreg] = sreg;
11880 g_assert (lvregs_len < 1024);
11881 lvregs [lvregs_len ++] = var->dreg;
11885 sregs [srcindex] = sreg;
11886 //mono_inst_set_src_registers (ins, sregs);
11888 if (regtype == 'l') {
11889 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
11890 mono_bblock_insert_before_ins (bb, ins, load_ins);
11891 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
11892 mono_bblock_insert_before_ins (bb, ins, load_ins);
11893 use_ins = load_ins;
11896 #if SIZEOF_REGISTER == 4
11897 g_assert (load_opcode != OP_LOADI8_MEMBASE);
11899 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
11900 mono_bblock_insert_before_ins (bb, ins, load_ins);
11901 use_ins = load_ins;
11905 if (var->dreg < orig_next_vreg) {
11906 live_range_end [var->dreg] = use_ins;
11907 live_range_end_bb [var->dreg] = bb;
11910 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
11913 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
11914 tmp->inst_c1 = var->dreg;
11915 mono_bblock_insert_after_ins (bb, ins, tmp);
11919 mono_inst_set_src_registers (ins, sregs);
11921 if (dest_has_lvreg) {
11922 g_assert (ins->dreg != -1);
11923 vreg_to_lvreg [prev_dreg] = ins->dreg;
11924 g_assert (lvregs_len < 1024);
11925 lvregs [lvregs_len ++] = prev_dreg;
11926 dest_has_lvreg = FALSE;
11930 tmp_reg = ins->dreg;
11931 ins->dreg = ins->sreg2;
11932 ins->sreg2 = tmp_reg;
11935 if (MONO_IS_CALL (ins)) {
11936 /* Clear vreg_to_lvreg array */
11937 for (i = 0; i < lvregs_len; i++)
11938 vreg_to_lvreg [lvregs [i]] = 0;
11940 } else if (ins->opcode == OP_NOP) {
11942 MONO_INST_NULLIFY_SREGS (ins);
11945 if (cfg->verbose_level > 2)
11946 mono_print_ins_index (1, ins);
11949 /* Extend the live range based on the liveness info */
11950 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
11951 for (i = 0; i < cfg->num_varinfo; i ++) {
11952 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
11954 if (vreg_is_volatile (cfg, vi->vreg))
11955 /* The liveness info is incomplete */
11958 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
11959 /* Live from at least the first ins of this bb */
11960 live_range_start [vi->vreg] = bb->code;
11961 live_range_start_bb [vi->vreg] = bb;
11964 if (mono_bitset_test_fast (bb->live_out_set, i)) {
11965 /* Live at least until the last ins of this bb */
11966 live_range_end [vi->vreg] = bb->last_ins;
11967 live_range_end_bb [vi->vreg] = bb;
11973 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
11975 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
11976 * by storing the current native offset into MonoMethodVar->live_range_start/end.
11978 if (cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
11979 for (i = 0; i < cfg->num_varinfo; ++i) {
11980 int vreg = MONO_VARINFO (cfg, i)->vreg;
11983 if (live_range_start [vreg]) {
11984 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
11986 ins->inst_c1 = vreg;
11987 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
11989 if (live_range_end [vreg]) {
11990 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
11992 ins->inst_c1 = vreg;
11993 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
11994 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
11996 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
12002 g_free (live_range_start);
12003 g_free (live_range_end);
12004 g_free (live_range_start_bb);
12005 g_free (live_range_end_bb);
12010 * - use 'iadd' instead of 'int_add'
12011 * - handling ovf opcodes: decompose in method_to_ir.
12012 * - unify iregs/fregs
12013 * -> partly done, the missing parts are:
12014 * - a more complete unification would involve unifying the hregs as well, so
12015 * code wouldn't need if (fp) all over the place. but that would mean the hregs
12016 * would no longer map to the machine hregs, so the code generators would need to
12017 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
12018 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
12019 * fp/non-fp branches speeds it up by about 15%.
12020 * - use sext/zext opcodes instead of shifts
12022 * - get rid of TEMPLOADs if possible and use vregs instead
12023 * - clean up usage of OP_P/OP_ opcodes
12024 * - cleanup usage of DUMMY_USE
12025 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
12027 * - set the stack type and allocate a dreg in the EMIT_NEW macros
12028 * - get rid of all the <foo>2 stuff when the new JIT is ready.
12029 * - make sure handle_stack_args () is called before the branch is emitted
12030 * - when the new IR is done, get rid of all unused stuff
12031 * - COMPARE/BEQ as separate instructions or unify them ?
12032 * - keeping them separate allows specialized compare instructions like
12033 * compare_imm, compare_membase
12034 * - most back ends unify fp compare+branch, fp compare+ceq
12035 * - integrate mono_save_args into inline_method
12036 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
12037 * - handle long shift opts on 32 bit platforms somehow: they require
12038 * 3 sregs (2 for arg1 and 1 for arg2)
12039 * - make byref a 'normal' type.
12040 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
12041 * variable if needed.
12042 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
12043 * like inline_method.
12044 * - remove inlining restrictions
12045 * - fix LNEG and enable cfold of INEG
12046 * - generalize x86 optimizations like ldelema as a peephole optimization
12047 * - add store_mem_imm for amd64
12048 * - optimize the loading of the interruption flag in the managed->native wrappers
12049 * - avoid special handling of OP_NOP in passes
12050 * - move code inserting instructions into one function/macro.
12051 * - try a coalescing phase after liveness analysis
12052 * - add float -> vreg conversion + local optimizations on !x86
12053 * - figure out how to handle decomposed branches during optimizations, ie.
12054 * compare+branch, op_jump_table+op_br etc.
12055 * - promote RuntimeXHandles to vregs
12056 * - vtype cleanups:
12057 * - add a NEW_VARLOADA_VREG macro
12058 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
12059 * accessing vtype fields.
12060 * - get rid of I8CONST on 64 bit platforms
12061 * - dealing with the increase in code size due to branches created during opcode
12063 * - use extended basic blocks
12064 * - all parts of the JIT
12065 * - handle_global_vregs () && local regalloc
12066 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
12067 * - sources of increase in code size:
12070 * - isinst and castclass
12071 * - lvregs not allocated to global registers even if used multiple times
12072 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
12074 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
12075 * - add all micro optimizations from the old JIT
12076 * - put tree optimizations into the deadce pass
12077 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
12078 * specific function.
12079 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
12080 * fcompare + branchCC.
12081 * - create a helper function for allocating a stack slot, taking into account
12082 * MONO_CFG_HAS_SPILLUP.
12084 * - merge the ia64 switch changes.
12085 * - optimize mono_regstate2_alloc_int/float.
12086 * - fix the pessimistic handling of variables accessed in exception handler blocks.
12087 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
12088 * parts of the tree could be separated by other instructions, killing the tree
12089 * arguments, or stores killing loads etc. Also, should we fold loads into other
12090 * instructions if the result of the load is used multiple times ?
12091 * - make the REM_IMM optimization in mini-x86.c arch-independent.
12092 * - LAST MERGE: 108395.
12093 * - when returning vtypes in registers, generate IR and append it to the end of the
12094 * last bb instead of doing it in the epilog.
12095 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
12103 - When to decompose opcodes:
12104 - earlier: this makes some optimizations hard to implement, since the low level IR
12105 no longer contains the neccessary information. But it is easier to do.
12106 - later: harder to implement, enables more optimizations.
12107 - Branches inside bblocks:
12108 - created when decomposing complex opcodes.
12109 - branches to another bblock: harmless, but not tracked by the branch
12110 optimizations, so need to branch to a label at the start of the bblock.
12111 - branches to inside the same bblock: very problematic, trips up the local
12112 reg allocator. Can be fixed by spitting the current bblock, but that is a
12113 complex operation, since some local vregs can become global vregs etc.
12114 - Local/global vregs:
12115 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
12116 local register allocator.
12117 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
12118 structure, created by mono_create_var (). Assigned to hregs or the stack by
12119 the global register allocator.
12120 - When to do optimizations like alu->alu_imm:
12121 - earlier -> saves work later on since the IR will be smaller/simpler
12122 - later -> can work on more instructions
12123 - Handling of valuetypes:
12124 - When a vtype is pushed on the stack, a new temporary is created, an
12125 instruction computing its address (LDADDR) is emitted and pushed on
12126 the stack. Need to optimize cases when the vtype is used immediately as in
12127 argument passing, stloc etc.
12128 - Instead of the to_end stuff in the old JIT, simply call the function handling
12129 the values on the stack before emitting the last instruction of the bb.
12132 #endif /* DISABLE_JIT */